2024-12-09 16:30:01,844 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-09 16:30:01,867 main DEBUG Took 0.020285 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-09 16:30:01,867 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-09 16:30:01,868 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-09 16:30:01,869 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-09 16:30:01,871 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 16:30:01,882 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-09 16:30:01,901 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 16:30:01,903 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 16:30:01,904 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 16:30:01,905 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 16:30:01,906 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 16:30:01,907 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 16:30:01,908 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 16:30:01,909 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 16:30:01,910 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 16:30:01,910 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 16:30:01,911 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 16:30:01,912 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 16:30:01,912 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 16:30:01,913 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 16:30:01,913 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 16:30:01,913 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 16:30:01,914 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 16:30:01,914 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 16:30:01,915 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 16:30:01,915 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 16:30:01,916 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 16:30:01,916 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 16:30:01,917 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 16:30:01,917 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 16:30:01,918 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 16:30:01,918 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-09 16:30:01,920 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 16:30:01,922 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-09 16:30:01,924 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-09 16:30:01,925 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-09 16:30:01,926 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-09 16:30:01,927 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-09 16:30:01,937 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-09 16:30:01,942 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-09 16:30:01,944 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-09 16:30:01,945 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-09 16:30:01,945 main DEBUG createAppenders(={Console}) 2024-12-09 16:30:01,946 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-12-09 16:30:01,947 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-09 16:30:01,948 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-12-09 16:30:01,949 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-09 16:30:01,950 main DEBUG OutputStream closed 2024-12-09 16:30:01,952 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-09 16:30:01,952 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-09 16:30:01,952 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-12-09 16:30:02,092 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-09 16:30:02,097 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-09 16:30:02,098 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-09 16:30:02,100 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-09 16:30:02,101 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-09 16:30:02,101 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-09 16:30:02,102 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-09 16:30:02,102 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-09 16:30:02,103 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-09 16:30:02,103 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-09 16:30:02,104 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-09 16:30:02,104 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-09 16:30:02,105 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-09 16:30:02,105 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-09 16:30:02,106 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-09 16:30:02,106 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-09 16:30:02,106 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-09 16:30:02,107 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-09 16:30:02,110 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09 16:30:02,111 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-12-09 16:30:02,111 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-09 16:30:02,112 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-12-09T16:30:02,570 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d 2024-12-09 16:30:02,574 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-09 16:30:02,576 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09T16:30:02,590 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-12-09T16:30:02,687 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=889, ProcessCount=11, AvailableMemoryMB=7316 2024-12-09T16:30:02,694 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T16:30:02,723 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/cluster_f2ab7ebb-95bd-0eda-ded6-21ca768e7c67, deleteOnExit=true 2024-12-09T16:30:02,723 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T16:30:02,724 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/test.cache.data in system properties and HBase conf 2024-12-09T16:30:02,725 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T16:30:02,726 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/hadoop.log.dir in system properties and HBase conf 2024-12-09T16:30:02,727 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T16:30:02,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T16:30:02,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T16:30:02,846 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-09T16:30:02,997 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T16:30:03,003 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T16:30:03,004 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T16:30:03,004 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T16:30:03,005 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T16:30:03,006 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T16:30:03,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T16:30:03,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T16:30:03,008 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T16:30:03,009 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T16:30:03,009 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/nfs.dump.dir in system properties and HBase conf 2024-12-09T16:30:03,010 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/java.io.tmpdir in system properties and HBase conf 2024-12-09T16:30:03,011 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T16:30:03,011 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T16:30:03,012 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T16:30:03,706 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T16:30:04,582 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-09T16:30:04,695 INFO [Time-limited test {}] log.Log(170): Logging initialized @3860ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-09T16:30:04,806 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:30:04,897 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:30:04,927 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:30:04,927 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:30:04,929 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T16:30:04,955 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:30:04,959 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f2ab976{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:30:04,960 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46a86f8c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:30:05,270 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c77270f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/java.io.tmpdir/jetty-localhost-38475-hadoop-hdfs-3_4_1-tests_jar-_-any-10809752052799715014/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T16:30:05,278 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c5145e6{HTTP/1.1, (http/1.1)}{localhost:38475} 2024-12-09T16:30:05,279 INFO [Time-limited test {}] server.Server(415): Started @4444ms 2024-12-09T16:30:05,313 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T16:30:06,221 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:30:06,236 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:30:06,238 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:30:06,239 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:30:06,239 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T16:30:06,242 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2aa5bb6e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:30:06,243 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5bdc1e47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:30:06,393 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@59e63bea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/java.io.tmpdir/jetty-localhost-38873-hadoop-hdfs-3_4_1-tests_jar-_-any-4944800121392202753/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:30:06,395 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@264a9341{HTTP/1.1, (http/1.1)}{localhost:38873} 2024-12-09T16:30:06,396 INFO [Time-limited test {}] server.Server(415): Started @5561ms 2024-12-09T16:30:06,472 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T16:30:06,679 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:30:06,687 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:30:06,688 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:30:06,688 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:30:06,689 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T16:30:06,690 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ae73635{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:30:06,690 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@335a4f9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:30:06,829 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@55d18735{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/java.io.tmpdir/jetty-localhost-33049-hadoop-hdfs-3_4_1-tests_jar-_-any-16966622590407951143/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:30:06,830 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@15370523{HTTP/1.1, (http/1.1)}{localhost:33049} 2024-12-09T16:30:06,830 INFO [Time-limited test {}] server.Server(415): Started @5996ms 2024-12-09T16:30:06,834 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T16:30:07,821 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/cluster_f2ab7ebb-95bd-0eda-ded6-21ca768e7c67/data/data4/current/BP-1356906885-172.17.0.2-1733761803868/current, will proceed with Du for space computation calculation, 2024-12-09T16:30:07,821 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/cluster_f2ab7ebb-95bd-0eda-ded6-21ca768e7c67/data/data3/current/BP-1356906885-172.17.0.2-1733761803868/current, will proceed with Du for space computation calculation, 2024-12-09T16:30:07,821 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/cluster_f2ab7ebb-95bd-0eda-ded6-21ca768e7c67/data/data1/current/BP-1356906885-172.17.0.2-1733761803868/current, will proceed with Du for space computation calculation, 2024-12-09T16:30:07,822 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/cluster_f2ab7ebb-95bd-0eda-ded6-21ca768e7c67/data/data2/current/BP-1356906885-172.17.0.2-1733761803868/current, will proceed with Du for space computation calculation, 2024-12-09T16:30:07,878 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T16:30:07,880 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T16:30:07,947 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2f7b7527c47cf101 with lease ID 0xfb7c4f2f163a879f: Processing first storage report for DS-4c53d357-0bf9-410c-8293-00c42bd94598 from datanode DatanodeRegistration(127.0.0.1:32943, datanodeUuid=0f1f2acf-b8c3-4fbf-bc65-153178c47535, infoPort=38321, infoSecurePort=0, ipcPort=39237, storageInfo=lv=-57;cid=testClusterID;nsid=375384775;c=1733761803868) 2024-12-09T16:30:07,949 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2f7b7527c47cf101 with lease ID 0xfb7c4f2f163a879f: from storage DS-4c53d357-0bf9-410c-8293-00c42bd94598 node DatanodeRegistration(127.0.0.1:32943, datanodeUuid=0f1f2acf-b8c3-4fbf-bc65-153178c47535, infoPort=38321, infoSecurePort=0, ipcPort=39237, storageInfo=lv=-57;cid=testClusterID;nsid=375384775;c=1733761803868), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-09T16:30:07,950 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc80250beb198f9d1 with lease ID 0xfb7c4f2f163a87a0: Processing first storage report for DS-e1b376d1-31c9-4925-a105-b91c5c23a312 from datanode DatanodeRegistration(127.0.0.1:36725, datanodeUuid=023175ce-193e-4521-882c-21415e8da82b, infoPort=44395, infoSecurePort=0, ipcPort=34623, storageInfo=lv=-57;cid=testClusterID;nsid=375384775;c=1733761803868) 2024-12-09T16:30:07,950 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc80250beb198f9d1 with lease ID 0xfb7c4f2f163a87a0: from storage DS-e1b376d1-31c9-4925-a105-b91c5c23a312 node DatanodeRegistration(127.0.0.1:36725, datanodeUuid=023175ce-193e-4521-882c-21415e8da82b, infoPort=44395, infoSecurePort=0, ipcPort=34623, storageInfo=lv=-57;cid=testClusterID;nsid=375384775;c=1733761803868), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:30:07,951 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2f7b7527c47cf101 with lease ID 0xfb7c4f2f163a879f: Processing first storage report for DS-4d182f32-e521-4521-a690-f14b2aea1eff from datanode DatanodeRegistration(127.0.0.1:32943, datanodeUuid=0f1f2acf-b8c3-4fbf-bc65-153178c47535, infoPort=38321, infoSecurePort=0, ipcPort=39237, storageInfo=lv=-57;cid=testClusterID;nsid=375384775;c=1733761803868) 2024-12-09T16:30:07,951 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2f7b7527c47cf101 with lease ID 0xfb7c4f2f163a879f: from storage DS-4d182f32-e521-4521-a690-f14b2aea1eff node DatanodeRegistration(127.0.0.1:32943, datanodeUuid=0f1f2acf-b8c3-4fbf-bc65-153178c47535, infoPort=38321, infoSecurePort=0, ipcPort=39237, storageInfo=lv=-57;cid=testClusterID;nsid=375384775;c=1733761803868), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:30:07,953 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc80250beb198f9d1 with lease ID 0xfb7c4f2f163a87a0: Processing first storage report for DS-7e40c2e4-eb1c-4243-a826-68ec3d1fffb8 from datanode DatanodeRegistration(127.0.0.1:36725, datanodeUuid=023175ce-193e-4521-882c-21415e8da82b, infoPort=44395, infoSecurePort=0, ipcPort=34623, storageInfo=lv=-57;cid=testClusterID;nsid=375384775;c=1733761803868) 2024-12-09T16:30:07,954 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc80250beb198f9d1 with lease ID 0xfb7c4f2f163a87a0: from storage DS-7e40c2e4-eb1c-4243-a826-68ec3d1fffb8 node DatanodeRegistration(127.0.0.1:36725, datanodeUuid=023175ce-193e-4521-882c-21415e8da82b, infoPort=44395, infoSecurePort=0, ipcPort=34623, storageInfo=lv=-57;cid=testClusterID;nsid=375384775;c=1733761803868), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T16:30:08,005 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d 2024-12-09T16:30:08,122 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/cluster_f2ab7ebb-95bd-0eda-ded6-21ca768e7c67/zookeeper_0, clientPort=63146, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/cluster_f2ab7ebb-95bd-0eda-ded6-21ca768e7c67/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/cluster_f2ab7ebb-95bd-0eda-ded6-21ca768e7c67/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T16:30:08,140 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63146 2024-12-09T16:30:08,157 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:30:08,161 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:30:08,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741825_1001 (size=7) 2024-12-09T16:30:08,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741825_1001 (size=7) 2024-12-09T16:30:08,916 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f with version=8 2024-12-09T16:30:08,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/hbase-staging 2024-12-09T16:30:09,028 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-09T16:30:09,346 INFO [Time-limited test {}] client.ConnectionUtils(128): master/dfce6ce5b10c:0 server-side Connection retries=45 2024-12-09T16:30:09,359 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:30:09,360 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T16:30:09,365 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T16:30:09,366 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:30:09,366 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T16:30:09,563 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T16:30:09,645 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-09T16:30:09,658 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-09T16:30:09,662 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T16:30:09,691 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 24031 (auto-detected) 2024-12-09T16:30:09,693 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-09T16:30:09,713 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45127 2024-12-09T16:30:09,742 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45127 connecting to ZooKeeper ensemble=127.0.0.1:63146 2024-12-09T16:30:09,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:451270x0, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T16:30:09,873 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45127-0x100bfc3b5960000 connected 2024-12-09T16:30:09,992 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:30:09,995 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:30:10,010 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:30:10,017 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f, hbase.cluster.distributed=false 2024-12-09T16:30:10,054 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T16:30:10,066 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45127 2024-12-09T16:30:10,067 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45127 2024-12-09T16:30:10,068 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45127 2024-12-09T16:30:10,070 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45127 2024-12-09T16:30:10,071 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45127 2024-12-09T16:30:10,232 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/dfce6ce5b10c:0 server-side Connection retries=45 2024-12-09T16:30:10,236 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:30:10,237 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T16:30:10,239 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T16:30:10,240 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:30:10,241 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T16:30:10,245 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T16:30:10,252 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T16:30:10,254 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46053 2024-12-09T16:30:10,257 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46053 connecting to ZooKeeper ensemble=127.0.0.1:63146 2024-12-09T16:30:10,259 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:30:10,270 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:30:10,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:460530x0, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T16:30:10,297 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:460530x0, quorum=127.0.0.1:63146, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:30:10,301 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46053-0x100bfc3b5960001 connected 2024-12-09T16:30:10,306 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T16:30:10,322 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T16:30:10,329 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46053-0x100bfc3b5960001, quorum=127.0.0.1:63146, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T16:30:10,339 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46053-0x100bfc3b5960001, quorum=127.0.0.1:63146, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T16:30:10,346 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46053 2024-12-09T16:30:10,347 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46053 2024-12-09T16:30:10,348 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46053 2024-12-09T16:30:10,349 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46053 2024-12-09T16:30:10,350 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46053 2024-12-09T16:30:10,377 DEBUG [M:0;dfce6ce5b10c:45127 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;dfce6ce5b10c:45127 2024-12-09T16:30:10,379 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/dfce6ce5b10c,45127,1733761809123 2024-12-09T16:30:10,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:30:10,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46053-0x100bfc3b5960001, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:30:10,511 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/dfce6ce5b10c,45127,1733761809123 2024-12-09T16:30:10,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46053-0x100bfc3b5960001, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T16:30:10,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:30:10,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46053-0x100bfc3b5960001, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:30:10,658 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T16:30:10,660 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/dfce6ce5b10c,45127,1733761809123 from backup master directory 2024-12-09T16:30:10,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46053-0x100bfc3b5960001, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:30:10,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/dfce6ce5b10c,45127,1733761809123 2024-12-09T16:30:10,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:30:10,805 WARN [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T16:30:10,805 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=dfce6ce5b10c,45127,1733761809123 2024-12-09T16:30:10,808 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-09T16:30:10,809 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-09T16:30:10,902 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/hbase.id] with ID: 1075f1bb-fd5f-42d8-9e8b-0e48cb39c8a2 2024-12-09T16:30:10,902 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/.tmp/hbase.id 2024-12-09T16:30:10,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741826_1002 (size=42) 2024-12-09T16:30:10,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741826_1002 (size=42) 2024-12-09T16:30:10,919 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/.tmp/hbase.id]:[hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/hbase.id] 2024-12-09T16:30:10,977 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:30:10,982 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T16:30:11,002 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-12-09T16:30:11,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:30:11,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46053-0x100bfc3b5960001, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:30:11,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741827_1003 (size=196) 2024-12-09T16:30:11,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741827_1003 (size=196) 2024-12-09T16:30:11,060 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T16:30:11,062 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T16:30:11,068 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T16:30:11,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741828_1004 (size=1189) 2024-12-09T16:30:11,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741828_1004 (size=1189) 2024-12-09T16:30:11,541 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/data/master/store 2024-12-09T16:30:11,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741829_1005 (size=34) 2024-12-09T16:30:11,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741829_1005 (size=34) 2024-12-09T16:30:11,577 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-09T16:30:11,582 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:30:11,584 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T16:30:11,584 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:30:11,584 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:30:11,587 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T16:30:11,587 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:30:11,587 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:30:11,589 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733761811583Disabling compacts and flushes for region at 1733761811584 (+1 ms)Disabling writes for close at 1733761811587 (+3 ms)Writing region close event to WAL at 1733761811587Closed at 1733761811587 2024-12-09T16:30:11,592 WARN [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/data/master/store/.initializing 2024-12-09T16:30:11,592 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/WALs/dfce6ce5b10c,45127,1733761809123 2024-12-09T16:30:11,622 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=dfce6ce5b10c%2C45127%2C1733761809123, suffix=, logDir=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/WALs/dfce6ce5b10c,45127,1733761809123, archiveDir=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/oldWALs, maxLogs=10 2024-12-09T16:30:11,636 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C45127%2C1733761809123.1733761811629 2024-12-09T16:30:11,664 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/WALs/dfce6ce5b10c,45127,1733761809123/dfce6ce5b10c%2C45127%2C1733761809123.1733761811629 2024-12-09T16:30:11,681 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44395:44395),(127.0.0.1/127.0.0.1:38321:38321)] 2024-12-09T16:30:11,685 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T16:30:11,686 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:30:11,691 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:30:11,693 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:30:11,751 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:30:11,788 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T16:30:11,794 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:30:11,799 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:30:11,800 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:30:11,805 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T16:30:11,806 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:30:11,808 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:30:11,808 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:30:11,814 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T16:30:11,814 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:30:11,816 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:30:11,817 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:30:11,827 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T16:30:11,828 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:30:11,830 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:30:11,831 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:30:11,837 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:30:11,839 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:30:11,861 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:30:11,862 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:30:11,867 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T16:30:11,876 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:30:11,887 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T16:30:11,890 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=872844, jitterRate=0.10987876355648041}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T16:30:11,902 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733761811714Initializing all the Stores at 1733761811717 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733761811718 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733761811719 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733761811719Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733761811719Cleaning up temporary data from old regions at 1733761811862 (+143 ms)Region opened successfully at 1733761811901 (+39 ms) 2024-12-09T16:30:11,906 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T16:30:11,973 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@777d8338, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=dfce6ce5b10c/172.17.0.2:0 2024-12-09T16:30:12,046 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T16:30:12,072 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T16:30:12,073 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T16:30:12,083 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T16:30:12,086 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 3 msec 2024-12-09T16:30:12,095 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 7 msec 2024-12-09T16:30:12,095 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T16:30:12,172 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T16:30:12,189 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T16:30:12,227 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T16:30:12,230 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T16:30:12,232 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T16:30:12,243 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T16:30:12,246 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T16:30:12,252 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T16:30:12,268 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T16:30:12,270 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T16:30:12,280 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T16:30:12,313 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T16:30:12,322 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T16:30:12,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T16:30:12,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:30:12,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46053-0x100bfc3b5960001, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T16:30:12,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46053-0x100bfc3b5960001, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:30:12,350 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=dfce6ce5b10c,45127,1733761809123, sessionid=0x100bfc3b5960000, setting cluster-up flag (Was=false) 2024-12-09T16:30:12,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:30:12,394 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46053-0x100bfc3b5960001, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:30:12,422 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T16:30:12,425 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=dfce6ce5b10c,45127,1733761809123 2024-12-09T16:30:12,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46053-0x100bfc3b5960001, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:30:12,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:30:12,480 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T16:30:12,482 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=dfce6ce5b10c,45127,1733761809123 2024-12-09T16:30:12,490 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T16:30:12,560 INFO [RS:0;dfce6ce5b10c:46053 {}] regionserver.HRegionServer(746): ClusterId : 1075f1bb-fd5f-42d8-9e8b-0e48cb39c8a2 2024-12-09T16:30:12,562 DEBUG [RS:0;dfce6ce5b10c:46053 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T16:30:12,583 DEBUG [RS:0;dfce6ce5b10c:46053 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T16:30:12,584 DEBUG [RS:0;dfce6ce5b10c:46053 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T16:30:12,592 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T16:30:12,599 DEBUG [RS:0;dfce6ce5b10c:46053 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T16:30:12,600 DEBUG [RS:0;dfce6ce5b10c:46053 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@432c608a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=dfce6ce5b10c/172.17.0.2:0 2024-12-09T16:30:12,603 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T16:30:12,611 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T16:30:12,621 DEBUG [RS:0;dfce6ce5b10c:46053 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;dfce6ce5b10c:46053 2024-12-09T16:30:12,619 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: dfce6ce5b10c,45127,1733761809123 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T16:30:12,628 INFO [RS:0;dfce6ce5b10c:46053 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T16:30:12,629 INFO [RS:0;dfce6ce5b10c:46053 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T16:30:12,629 DEBUG [RS:0;dfce6ce5b10c:46053 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T16:30:12,630 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/dfce6ce5b10c:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:30:12,630 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/dfce6ce5b10c:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:30:12,631 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:30:12,631 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:30:12,631 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/dfce6ce5b10c:0, corePoolSize=10, maxPoolSize=10 2024-12-09T16:30:12,632 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:30:12,632 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=2, maxPoolSize=2 2024-12-09T16:30:12,632 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:30:12,634 INFO [RS:0;dfce6ce5b10c:46053 {}] regionserver.HRegionServer(2659): reportForDuty to master=dfce6ce5b10c,45127,1733761809123 with port=46053, startcode=1733761810171 2024-12-09T16:30:12,639 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T16:30:12,640 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T16:30:12,645 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733761842645 2024-12-09T16:30:12,648 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T16:30:12,648 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:30:12,648 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T16:30:12,649 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T16:30:12,650 DEBUG [RS:0;dfce6ce5b10c:46053 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T16:30:12,657 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T16:30:12,657 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T16:30:12,657 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T16:30:12,658 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T16:30:12,658 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T16:30:12,672 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T16:30:12,673 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T16:30:12,674 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T16:30:12,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741831_1007 (size=1321) 2024-12-09T16:30:12,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741831_1007 (size=1321) 2024-12-09T16:30:12,680 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T16:30:12,680 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T16:30:12,681 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T16:30:12,682 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f 2024-12-09T16:30:12,686 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.large.0-1733761812682,5,FailOnTimeoutGroup] 2024-12-09T16:30:12,698 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.small.0-1733761812686,5,FailOnTimeoutGroup] 2024-12-09T16:30:12,698 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T16:30:12,698 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T16:30:12,700 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T16:30:12,700 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T16:30:12,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741832_1008 (size=32) 2024-12-09T16:30:12,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741832_1008 (size=32) 2024-12-09T16:30:12,712 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:30:12,716 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T16:30:12,721 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T16:30:12,721 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:30:12,722 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:30:12,723 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T16:30:12,726 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T16:30:12,727 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:30:12,729 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:30:12,729 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T16:30:12,739 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T16:30:12,739 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:30:12,741 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:30:12,742 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T16:30:12,746 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T16:30:12,746 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:30:12,749 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:30:12,749 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T16:30:12,750 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60179, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T16:30:12,758 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45127 {}] master.ServerManager(363): Checking decommissioned status of RegionServer dfce6ce5b10c,46053,1733761810171 2024-12-09T16:30:12,759 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/hbase/meta/1588230740 2024-12-09T16:30:12,760 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/hbase/meta/1588230740 2024-12-09T16:30:12,761 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45127 {}] master.ServerManager(517): Registering regionserver=dfce6ce5b10c,46053,1733761810171 2024-12-09T16:30:12,765 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T16:30:12,765 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T16:30:12,767 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T16:30:12,782 DEBUG [RS:0;dfce6ce5b10c:46053 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f 2024-12-09T16:30:12,782 DEBUG [RS:0;dfce6ce5b10c:46053 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38839 2024-12-09T16:30:12,783 DEBUG [RS:0;dfce6ce5b10c:46053 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T16:30:12,787 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T16:30:12,795 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T16:30:12,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T16:30:12,798 DEBUG [RS:0;dfce6ce5b10c:46053 {}] zookeeper.ZKUtil(111): regionserver:46053-0x100bfc3b5960001, quorum=127.0.0.1:63146, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/dfce6ce5b10c,46053,1733761810171 2024-12-09T16:30:12,798 WARN [RS:0;dfce6ce5b10c:46053 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T16:30:12,798 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=728564, jitterRate=-0.07358346879482269}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T16:30:12,798 INFO [RS:0;dfce6ce5b10c:46053 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T16:30:12,798 DEBUG [RS:0;dfce6ce5b10c:46053 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/WALs/dfce6ce5b10c,46053,1733761810171 2024-12-09T16:30:12,801 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733761812712Initializing all the Stores at 1733761812714 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733761812715 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733761812715Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733761812715Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733761812715Cleaning up temporary data from old regions at 1733761812765 (+50 ms)Region opened successfully at 1733761812801 (+36 ms) 2024-12-09T16:30:12,802 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T16:30:12,802 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T16:30:12,802 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T16:30:12,802 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T16:30:12,802 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T16:30:12,805 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [dfce6ce5b10c,46053,1733761810171] 2024-12-09T16:30:12,807 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T16:30:12,807 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733761812801Disabling compacts and flushes for region at 1733761812801Disabling writes for close at 1733761812802 (+1 ms)Writing region close event to WAL at 1733761812806 (+4 ms)Closed at 1733761812807 (+1 ms) 2024-12-09T16:30:12,817 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T16:30:12,817 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T16:30:12,829 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T16:30:12,843 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T16:30:12,847 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T16:30:12,849 INFO [RS:0;dfce6ce5b10c:46053 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T16:30:12,866 INFO [RS:0;dfce6ce5b10c:46053 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T16:30:12,884 INFO [RS:0;dfce6ce5b10c:46053 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T16:30:12,884 INFO [RS:0;dfce6ce5b10c:46053 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:30:12,886 INFO [RS:0;dfce6ce5b10c:46053 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T16:30:12,895 INFO [RS:0;dfce6ce5b10c:46053 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T16:30:12,897 INFO [RS:0;dfce6ce5b10c:46053 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T16:30:12,898 DEBUG [RS:0;dfce6ce5b10c:46053 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:30:12,898 DEBUG [RS:0;dfce6ce5b10c:46053 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:30:12,898 DEBUG [RS:0;dfce6ce5b10c:46053 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:30:12,899 DEBUG [RS:0;dfce6ce5b10c:46053 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:30:12,903 DEBUG [RS:0;dfce6ce5b10c:46053 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:30:12,905 DEBUG [RS:0;dfce6ce5b10c:46053 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/dfce6ce5b10c:0, corePoolSize=2, maxPoolSize=2 2024-12-09T16:30:12,905 DEBUG [RS:0;dfce6ce5b10c:46053 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:30:12,905 DEBUG [RS:0;dfce6ce5b10c:46053 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:30:12,906 DEBUG [RS:0;dfce6ce5b10c:46053 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:30:12,906 DEBUG [RS:0;dfce6ce5b10c:46053 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:30:12,906 DEBUG [RS:0;dfce6ce5b10c:46053 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:30:12,907 DEBUG [RS:0;dfce6ce5b10c:46053 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:30:12,907 DEBUG [RS:0;dfce6ce5b10c:46053 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/dfce6ce5b10c:0, corePoolSize=3, maxPoolSize=3 2024-12-09T16:30:12,908 DEBUG [RS:0;dfce6ce5b10c:46053 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0, corePoolSize=3, maxPoolSize=3 2024-12-09T16:30:12,922 INFO [RS:0;dfce6ce5b10c:46053 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T16:30:12,922 INFO [RS:0;dfce6ce5b10c:46053 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T16:30:12,923 INFO [RS:0;dfce6ce5b10c:46053 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:30:12,923 INFO [RS:0;dfce6ce5b10c:46053 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T16:30:12,923 INFO [RS:0;dfce6ce5b10c:46053 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T16:30:12,923 INFO [RS:0;dfce6ce5b10c:46053 {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,46053,1733761810171-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T16:30:12,955 INFO [RS:0;dfce6ce5b10c:46053 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T16:30:12,958 INFO [RS:0;dfce6ce5b10c:46053 {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,46053,1733761810171-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:30:12,958 INFO [RS:0;dfce6ce5b10c:46053 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:30:12,959 INFO [RS:0;dfce6ce5b10c:46053 {}] regionserver.Replication(171): dfce6ce5b10c,46053,1733761810171 started 2024-12-09T16:30:12,999 WARN [dfce6ce5b10c:45127 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T16:30:13,001 INFO [RS:0;dfce6ce5b10c:46053 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:30:13,001 INFO [RS:0;dfce6ce5b10c:46053 {}] regionserver.HRegionServer(1482): Serving as dfce6ce5b10c,46053,1733761810171, RpcServer on dfce6ce5b10c/172.17.0.2:46053, sessionid=0x100bfc3b5960001 2024-12-09T16:30:13,002 DEBUG [RS:0;dfce6ce5b10c:46053 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T16:30:13,003 DEBUG [RS:0;dfce6ce5b10c:46053 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager dfce6ce5b10c,46053,1733761810171 2024-12-09T16:30:13,003 DEBUG [RS:0;dfce6ce5b10c:46053 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'dfce6ce5b10c,46053,1733761810171' 2024-12-09T16:30:13,003 DEBUG [RS:0;dfce6ce5b10c:46053 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T16:30:13,004 DEBUG [RS:0;dfce6ce5b10c:46053 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T16:30:13,005 DEBUG [RS:0;dfce6ce5b10c:46053 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T16:30:13,006 DEBUG [RS:0;dfce6ce5b10c:46053 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T16:30:13,006 DEBUG [RS:0;dfce6ce5b10c:46053 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager dfce6ce5b10c,46053,1733761810171 2024-12-09T16:30:13,006 DEBUG [RS:0;dfce6ce5b10c:46053 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'dfce6ce5b10c,46053,1733761810171' 2024-12-09T16:30:13,006 DEBUG [RS:0;dfce6ce5b10c:46053 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T16:30:13,007 DEBUG [RS:0;dfce6ce5b10c:46053 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T16:30:13,008 DEBUG [RS:0;dfce6ce5b10c:46053 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T16:30:13,008 INFO [RS:0;dfce6ce5b10c:46053 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T16:30:13,008 INFO [RS:0;dfce6ce5b10c:46053 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T16:30:13,124 INFO [RS:0;dfce6ce5b10c:46053 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=dfce6ce5b10c%2C46053%2C1733761810171, suffix=, logDir=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/WALs/dfce6ce5b10c,46053,1733761810171, archiveDir=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/oldWALs, maxLogs=32 2024-12-09T16:30:13,130 INFO [RS:0;dfce6ce5b10c:46053 {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C46053%2C1733761810171.1733761813129 2024-12-09T16:30:13,156 INFO [RS:0;dfce6ce5b10c:46053 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/WALs/dfce6ce5b10c,46053,1733761810171/dfce6ce5b10c%2C46053%2C1733761810171.1733761813129 2024-12-09T16:30:13,172 DEBUG [RS:0;dfce6ce5b10c:46053 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44395:44395),(127.0.0.1/127.0.0.1:38321:38321)] 2024-12-09T16:30:13,253 DEBUG [dfce6ce5b10c:45127 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T16:30:13,273 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=dfce6ce5b10c,46053,1733761810171 2024-12-09T16:30:13,282 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as dfce6ce5b10c,46053,1733761810171, state=OPENING 2024-12-09T16:30:13,326 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T16:30:13,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46053-0x100bfc3b5960001, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:30:13,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:30:13,336 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:30:13,338 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:30:13,341 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T16:30:13,344 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=dfce6ce5b10c,46053,1733761810171}] 2024-12-09T16:30:13,535 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T16:30:13,541 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46955, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T16:30:13,561 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T16:30:13,562 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T16:30:13,567 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=dfce6ce5b10c%2C46053%2C1733761810171.meta, suffix=.meta, logDir=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/WALs/dfce6ce5b10c,46053,1733761810171, archiveDir=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/oldWALs, maxLogs=32 2024-12-09T16:30:13,571 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C46053%2C1733761810171.meta.1733761813570.meta 2024-12-09T16:30:13,593 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/WALs/dfce6ce5b10c,46053,1733761810171/dfce6ce5b10c%2C46053%2C1733761810171.meta.1733761813570.meta 2024-12-09T16:30:13,611 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38321:38321),(127.0.0.1/127.0.0.1:44395:44395)] 2024-12-09T16:30:13,622 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T16:30:13,624 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T16:30:13,628 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T16:30:13,635 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T16:30:13,640 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T16:30:13,641 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:30:13,642 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T16:30:13,642 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T16:30:13,646 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T16:30:13,648 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T16:30:13,648 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:30:13,649 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:30:13,650 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T16:30:13,653 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T16:30:13,653 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:30:13,657 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:30:13,657 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T16:30:13,659 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T16:30:13,660 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:30:13,661 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:30:13,661 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T16:30:13,664 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T16:30:13,664 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:30:13,667 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:30:13,667 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T16:30:13,670 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/hbase/meta/1588230740 2024-12-09T16:30:13,675 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/hbase/meta/1588230740 2024-12-09T16:30:13,679 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T16:30:13,679 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T16:30:13,681 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T16:30:13,686 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T16:30:13,689 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=790270, jitterRate=0.004881024360656738}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T16:30:13,689 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T16:30:13,691 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733761813642Writing region info on filesystem at 1733761813643 (+1 ms)Initializing all the Stores at 1733761813645 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733761813645Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733761813645Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733761813645Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733761813645Cleaning up temporary data from old regions at 1733761813679 (+34 ms)Running coprocessor post-open hooks at 1733761813689 (+10 ms)Region opened successfully at 1733761813691 (+2 ms) 2024-12-09T16:30:13,703 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733761813522 2024-12-09T16:30:13,721 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T16:30:13,723 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T16:30:13,724 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=dfce6ce5b10c,46053,1733761810171 2024-12-09T16:30:13,732 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as dfce6ce5b10c,46053,1733761810171, state=OPEN 2024-12-09T16:30:13,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T16:30:13,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46053-0x100bfc3b5960001, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T16:30:13,839 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:30:13,839 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=dfce6ce5b10c,46053,1733761810171 2024-12-09T16:30:13,839 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:30:13,845 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T16:30:13,846 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=dfce6ce5b10c,46053,1733761810171 in 495 msec 2024-12-09T16:30:13,855 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T16:30:13,856 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.0190 sec 2024-12-09T16:30:13,858 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T16:30:13,858 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T16:30:13,888 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T16:30:13,891 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=dfce6ce5b10c,46053,1733761810171, seqNum=-1] 2024-12-09T16:30:13,922 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T16:30:13,925 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56839, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T16:30:13,957 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.4170 sec 2024-12-09T16:30:13,957 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733761813957, completionTime=-1 2024-12-09T16:30:13,960 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T16:30:13,960 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T16:30:13,997 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T16:30:13,997 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733761873997 2024-12-09T16:30:13,997 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733761933997 2024-12-09T16:30:13,997 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 37 msec 2024-12-09T16:30:14,002 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,45127,1733761809123-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:30:14,003 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,45127,1733761809123-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:30:14,003 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,45127,1733761809123-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:30:14,005 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-dfce6ce5b10c:45127, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:30:14,005 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T16:30:14,006 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T16:30:14,017 DEBUG [master/dfce6ce5b10c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T16:30:14,050 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 3.245sec 2024-12-09T16:30:14,052 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T16:30:14,054 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T16:30:14,058 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T16:30:14,059 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T16:30:14,060 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T16:30:14,065 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,45127,1733761809123-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T16:30:14,066 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,45127,1733761809123-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T16:30:14,141 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T16:30:14,143 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T16:30:14,143 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,45127,1733761809123-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:30:14,146 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@fe8b5ac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T16:30:14,149 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-09T16:30:14,149 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-09T16:30:14,154 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request dfce6ce5b10c,45127,-1 for getting cluster id 2024-12-09T16:30:14,159 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T16:30:14,170 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '1075f1bb-fd5f-42d8-9e8b-0e48cb39c8a2' 2024-12-09T16:30:14,174 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T16:30:14,174 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "1075f1bb-fd5f-42d8-9e8b-0e48cb39c8a2" 2024-12-09T16:30:14,174 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@698e8baf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T16:30:14,175 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [dfce6ce5b10c,45127,-1] 2024-12-09T16:30:14,178 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T16:30:14,181 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:30:14,182 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46364, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T16:30:14,186 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@128afa8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T16:30:14,187 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T16:30:14,197 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=dfce6ce5b10c,46053,1733761810171, seqNum=-1] 2024-12-09T16:30:14,198 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T16:30:14,201 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44226, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T16:30:14,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=dfce6ce5b10c,45127,1733761809123 2024-12-09T16:30:14,234 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:30:14,246 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T16:30:14,251 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T16:30:14,258 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is dfce6ce5b10c,45127,1733761809123 2024-12-09T16:30:14,261 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@53adb2ba 2024-12-09T16:30:14,263 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T16:30:14,267 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46368, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T16:30:14,270 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45127 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T16:30:14,270 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45127 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T16:30:14,277 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45127 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T16:30:14,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45127 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-12-09T16:30:14,293 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T16:30:14,296 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45127 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-12-09T16:30:14,297 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:30:14,300 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T16:30:14,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45127 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T16:30:14,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741835_1011 (size=389) 2024-12-09T16:30:14,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741835_1011 (size=389) 2024-12-09T16:30:14,382 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 9978dbb66d8f1739949a35bacb66b078, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f 2024-12-09T16:30:14,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741836_1012 (size=72) 2024-12-09T16:30:14,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741836_1012 (size=72) 2024-12-09T16:30:14,412 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:30:14,412 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 9978dbb66d8f1739949a35bacb66b078, disabling compactions & flushes 2024-12-09T16:30:14,412 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078. 2024-12-09T16:30:14,413 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078. 2024-12-09T16:30:14,413 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078. after waiting 0 ms 2024-12-09T16:30:14,413 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078. 2024-12-09T16:30:14,413 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078. 2024-12-09T16:30:14,413 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 9978dbb66d8f1739949a35bacb66b078: Waiting for close lock at 1733761814412Disabling compacts and flushes for region at 1733761814412Disabling writes for close at 1733761814413 (+1 ms)Writing region close event to WAL at 1733761814413Closed at 1733761814413 2024-12-09T16:30:14,416 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T16:30:14,423 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1733761814416"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733761814416"}]},"ts":"1733761814416"} 2024-12-09T16:30:14,433 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T16:30:14,435 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T16:30:14,440 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733761814436"}]},"ts":"1733761814436"} 2024-12-09T16:30:14,447 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-12-09T16:30:14,449 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9978dbb66d8f1739949a35bacb66b078, ASSIGN}] 2024-12-09T16:30:14,454 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9978dbb66d8f1739949a35bacb66b078, ASSIGN 2024-12-09T16:30:14,457 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9978dbb66d8f1739949a35bacb66b078, ASSIGN; state=OFFLINE, location=dfce6ce5b10c,46053,1733761810171; forceNewPlan=false, retain=false 2024-12-09T16:30:14,609 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9978dbb66d8f1739949a35bacb66b078, regionState=OPENING, regionLocation=dfce6ce5b10c,46053,1733761810171 2024-12-09T16:30:14,626 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9978dbb66d8f1739949a35bacb66b078, ASSIGN because future has completed 2024-12-09T16:30:14,628 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9978dbb66d8f1739949a35bacb66b078, server=dfce6ce5b10c,46053,1733761810171}] 2024-12-09T16:30:14,794 INFO [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078. 2024-12-09T16:30:14,794 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 9978dbb66d8f1739949a35bacb66b078, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078.', STARTKEY => '', ENDKEY => ''} 2024-12-09T16:30:14,795 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 9978dbb66d8f1739949a35bacb66b078 2024-12-09T16:30:14,795 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:30:14,795 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 9978dbb66d8f1739949a35bacb66b078 2024-12-09T16:30:14,795 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 9978dbb66d8f1739949a35bacb66b078 2024-12-09T16:30:14,798 INFO [StoreOpener-9978dbb66d8f1739949a35bacb66b078-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 9978dbb66d8f1739949a35bacb66b078 2024-12-09T16:30:14,801 INFO [StoreOpener-9978dbb66d8f1739949a35bacb66b078-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9978dbb66d8f1739949a35bacb66b078 columnFamilyName info 2024-12-09T16:30:14,801 DEBUG [StoreOpener-9978dbb66d8f1739949a35bacb66b078-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:30:14,803 INFO [StoreOpener-9978dbb66d8f1739949a35bacb66b078-1 {}] regionserver.HStore(327): Store=9978dbb66d8f1739949a35bacb66b078/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:30:14,803 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 9978dbb66d8f1739949a35bacb66b078 2024-12-09T16:30:14,805 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078 2024-12-09T16:30:14,807 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078 2024-12-09T16:30:14,808 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 9978dbb66d8f1739949a35bacb66b078 2024-12-09T16:30:14,808 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 9978dbb66d8f1739949a35bacb66b078 2024-12-09T16:30:14,811 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 9978dbb66d8f1739949a35bacb66b078 2024-12-09T16:30:14,815 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T16:30:14,817 INFO [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 9978dbb66d8f1739949a35bacb66b078; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=725047, jitterRate=-0.07805538177490234}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T16:30:14,817 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9978dbb66d8f1739949a35bacb66b078 2024-12-09T16:30:14,819 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 9978dbb66d8f1739949a35bacb66b078: Running coprocessor pre-open hook at 1733761814795Writing region info on filesystem at 1733761814795Initializing all the Stores at 1733761814798 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733761814798Cleaning up temporary data from old regions at 1733761814808 (+10 ms)Running coprocessor post-open hooks at 1733761814817 (+9 ms)Region opened successfully at 1733761814818 (+1 ms) 2024-12-09T16:30:14,821 INFO [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078., pid=6, masterSystemTime=1733761814785 2024-12-09T16:30:14,826 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078. 2024-12-09T16:30:14,826 INFO [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078. 2024-12-09T16:30:14,832 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9978dbb66d8f1739949a35bacb66b078, regionState=OPEN, openSeqNum=2, regionLocation=dfce6ce5b10c,46053,1733761810171 2024-12-09T16:30:14,837 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9978dbb66d8f1739949a35bacb66b078, server=dfce6ce5b10c,46053,1733761810171 because future has completed 2024-12-09T16:30:14,849 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T16:30:14,852 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 9978dbb66d8f1739949a35bacb66b078, server=dfce6ce5b10c,46053,1733761810171 in 213 msec 2024-12-09T16:30:14,856 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T16:30:14,856 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9978dbb66d8f1739949a35bacb66b078, ASSIGN in 400 msec 2024-12-09T16:30:14,859 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T16:30:14,859 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733761814859"}]},"ts":"1733761814859"} 2024-12-09T16:30:14,864 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-12-09T16:30:14,866 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T16:30:14,870 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 585 msec 2024-12-09T16:30:19,152 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-09T16:30:19,197 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T16:30:19,199 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-12-09T16:30:19,639 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T16:30:19,639 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T16:30:19,641 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-09T16:30:19,641 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-09T16:30:19,642 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T16:30:19,642 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T16:30:19,642 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T16:30:19,642 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-09T16:30:24,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45127 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T16:30:24,383 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-12-09T16:30:24,386 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-12-09T16:30:24,395 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-12-09T16:30:24,396 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078. 2024-12-09T16:30:24,397 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C46053%2C1733761810171.1733761824397 2024-12-09T16:30:24,407 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:30:24,408 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:30:24,408 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:30:24,408 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:30:24,408 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:30:24,409 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/WALs/dfce6ce5b10c,46053,1733761810171/dfce6ce5b10c%2C46053%2C1733761810171.1733761813129 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/WALs/dfce6ce5b10c,46053,1733761810171/dfce6ce5b10c%2C46053%2C1733761810171.1733761824397 2024-12-09T16:30:24,410 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38321:38321),(127.0.0.1/127.0.0.1:44395:44395)] 2024-12-09T16:30:24,410 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/WALs/dfce6ce5b10c,46053,1733761810171/dfce6ce5b10c%2C46053%2C1733761810171.1733761813129 is not closed yet, will try archiving it next time 2024-12-09T16:30:24,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741833_1009 (size=451) 2024-12-09T16:30:24,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741833_1009 (size=451) 2024-12-09T16:30:24,417 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/WALs/dfce6ce5b10c,46053,1733761810171/dfce6ce5b10c%2C46053%2C1733761810171.1733761813129 to hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/oldWALs/dfce6ce5b10c%2C46053%2C1733761810171.1733761813129 2024-12-09T16:30:24,424 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078., hostname=dfce6ce5b10c,46053,1733761810171, seqNum=2] 2024-12-09T16:30:36,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46053 {}] regionserver.HRegion(8855): Flush requested on 9978dbb66d8f1739949a35bacb66b078 2024-12-09T16:30:36,465 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9978dbb66d8f1739949a35bacb66b078 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T16:30:36,543 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/.tmp/info/9031a4d934f44f56b3f6017eae742c07 is 1080, key is row0001/info:/1733761824427/Put/seqid=0 2024-12-09T16:30:36,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741838_1014 (size=12509) 2024-12-09T16:30:36,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741838_1014 (size=12509) 2024-12-09T16:30:36,561 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/.tmp/info/9031a4d934f44f56b3f6017eae742c07 2024-12-09T16:30:36,638 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/.tmp/info/9031a4d934f44f56b3f6017eae742c07 as hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/9031a4d934f44f56b3f6017eae742c07 2024-12-09T16:30:36,652 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/9031a4d934f44f56b3f6017eae742c07, entries=7, sequenceid=11, filesize=12.2 K 2024-12-09T16:30:36,661 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 9978dbb66d8f1739949a35bacb66b078 in 196ms, sequenceid=11, compaction requested=false 2024-12-09T16:30:36,662 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9978dbb66d8f1739949a35bacb66b078: 2024-12-09T16:30:38,002 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T16:30:44,474 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C46053%2C1733761810171.1733761844474 2024-12-09T16:30:44,684 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 207 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32943,DS-4c53d357-0bf9-410c-8293-00c42bd94598,DISK], DatanodeInfoWithStorage[127.0.0.1:36725,DS-e1b376d1-31c9-4925-a105-b91c5c23a312,DISK]] 2024-12-09T16:30:44,685 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:30:44,685 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:30:44,685 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:30:44,685 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:30:44,685 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:30:44,686 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/WALs/dfce6ce5b10c,46053,1733761810171/dfce6ce5b10c%2C46053%2C1733761810171.1733761824397 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/WALs/dfce6ce5b10c,46053,1733761810171/dfce6ce5b10c%2C46053%2C1733761810171.1733761844474 2024-12-09T16:30:44,690 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38321:38321),(127.0.0.1/127.0.0.1:44395:44395)] 2024-12-09T16:30:44,690 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/WALs/dfce6ce5b10c,46053,1733761810171/dfce6ce5b10c%2C46053%2C1733761810171.1733761824397 is not closed yet, will try archiving it next time 2024-12-09T16:30:44,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741837_1013 (size=12399) 2024-12-09T16:30:44,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741837_1013 (size=12399) 2024-12-09T16:30:44,894 INFO [FSHLog-0-hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f-prefix:dfce6ce5b10c,46053,1733761810171 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32943,DS-4c53d357-0bf9-410c-8293-00c42bd94598,DISK], DatanodeInfoWithStorage[127.0.0.1:36725,DS-e1b376d1-31c9-4925-a105-b91c5c23a312,DISK]] 2024-12-09T16:30:47,098 INFO [FSHLog-0-hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f-prefix:dfce6ce5b10c,46053,1733761810171 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32943,DS-4c53d357-0bf9-410c-8293-00c42bd94598,DISK], DatanodeInfoWithStorage[127.0.0.1:36725,DS-e1b376d1-31c9-4925-a105-b91c5c23a312,DISK]] 2024-12-09T16:30:49,303 INFO [FSHLog-0-hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f-prefix:dfce6ce5b10c,46053,1733761810171 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32943,DS-4c53d357-0bf9-410c-8293-00c42bd94598,DISK], DatanodeInfoWithStorage[127.0.0.1:36725,DS-e1b376d1-31c9-4925-a105-b91c5c23a312,DISK]] 2024-12-09T16:30:51,507 INFO [FSHLog-0-hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f-prefix:dfce6ce5b10c,46053,1733761810171 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32943,DS-4c53d357-0bf9-410c-8293-00c42bd94598,DISK], DatanodeInfoWithStorage[127.0.0.1:36725,DS-e1b376d1-31c9-4925-a105-b91c5c23a312,DISK]] 2024-12-09T16:30:51,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46053 {}] regionserver.HRegion(8855): Flush requested on 9978dbb66d8f1739949a35bacb66b078 2024-12-09T16:30:51,508 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9978dbb66d8f1739949a35bacb66b078 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T16:30:51,710 INFO [FSHLog-0-hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f-prefix:dfce6ce5b10c,46053,1733761810171 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32943,DS-4c53d357-0bf9-410c-8293-00c42bd94598,DISK], DatanodeInfoWithStorage[127.0.0.1:36725,DS-e1b376d1-31c9-4925-a105-b91c5c23a312,DISK]] 2024-12-09T16:30:51,721 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/.tmp/info/7c2c4066cce849ec8ef92c3a9c471311 is 1080, key is row0008/info:/1733761838463/Put/seqid=0 2024-12-09T16:30:51,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741840_1016 (size=12509) 2024-12-09T16:30:51,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741840_1016 (size=12509) 2024-12-09T16:30:51,750 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/.tmp/info/7c2c4066cce849ec8ef92c3a9c471311 2024-12-09T16:30:51,775 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/.tmp/info/7c2c4066cce849ec8ef92c3a9c471311 as hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/7c2c4066cce849ec8ef92c3a9c471311 2024-12-09T16:30:51,799 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/7c2c4066cce849ec8ef92c3a9c471311, entries=7, sequenceid=21, filesize=12.2 K 2024-12-09T16:30:52,007 INFO [FSHLog-0-hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f-prefix:dfce6ce5b10c,46053,1733761810171 {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32943,DS-4c53d357-0bf9-410c-8293-00c42bd94598,DISK], DatanodeInfoWithStorage[127.0.0.1:36725,DS-e1b376d1-31c9-4925-a105-b91c5c23a312,DISK]] 2024-12-09T16:30:52,008 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 9978dbb66d8f1739949a35bacb66b078 in 500ms, sequenceid=21, compaction requested=false 2024-12-09T16:30:52,008 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9978dbb66d8f1739949a35bacb66b078: 2024-12-09T16:30:52,009 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-12-09T16:30:52,009 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T16:30:52,010 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/9031a4d934f44f56b3f6017eae742c07 because midkey is the same as first or last row 2024-12-09T16:30:53,711 INFO [FSHLog-0-hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f-prefix:dfce6ce5b10c,46053,1733761810171 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32943,DS-4c53d357-0bf9-410c-8293-00c42bd94598,DISK], DatanodeInfoWithStorage[127.0.0.1:36725,DS-e1b376d1-31c9-4925-a105-b91c5c23a312,DISK]] 2024-12-09T16:30:54,993 INFO [master/dfce6ce5b10c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T16:30:54,993 INFO [master/dfce6ce5b10c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T16:30:55,915 INFO [FSHLog-0-hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f-prefix:dfce6ce5b10c,46053,1733761810171 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32943,DS-4c53d357-0bf9-410c-8293-00c42bd94598,DISK], DatanodeInfoWithStorage[127.0.0.1:36725,DS-e1b376d1-31c9-4925-a105-b91c5c23a312,DISK]] 2024-12-09T16:30:55,917 WARN [FSHLog-0-hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f-prefix:dfce6ce5b10c,46053,1733761810171 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32943,DS-4c53d357-0bf9-410c-8293-00c42bd94598,DISK], DatanodeInfoWithStorage[127.0.0.1:36725,DS-e1b376d1-31c9-4925-a105-b91c5c23a312,DISK]] 2024-12-09T16:30:55,918 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog dfce6ce5b10c%2C46053%2C1733761810171:(num 1733761844474) roll requested 2024-12-09T16:30:55,919 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C46053%2C1733761810171.1733761855919 2024-12-09T16:30:56,127 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32943,DS-4c53d357-0bf9-410c-8293-00c42bd94598,DISK], DatanodeInfoWithStorage[127.0.0.1:36725,DS-e1b376d1-31c9-4925-a105-b91c5c23a312,DISK]] 2024-12-09T16:30:56,127 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:30:56,127 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:30:56,127 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:30:56,128 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:30:56,128 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:30:56,128 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/WALs/dfce6ce5b10c,46053,1733761810171/dfce6ce5b10c%2C46053%2C1733761810171.1733761844474 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/WALs/dfce6ce5b10c,46053,1733761810171/dfce6ce5b10c%2C46053%2C1733761810171.1733761855919 2024-12-09T16:30:56,129 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38321:38321),(127.0.0.1/127.0.0.1:44395:44395)] 2024-12-09T16:30:56,129 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/WALs/dfce6ce5b10c,46053,1733761810171/dfce6ce5b10c%2C46053%2C1733761810171.1733761844474 is not closed yet, will try archiving it next time 2024-12-09T16:30:56,130 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/WALs/dfce6ce5b10c,46053,1733761810171/dfce6ce5b10c%2C46053%2C1733761810171.1733761824397 to hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/oldWALs/dfce6ce5b10c%2C46053%2C1733761810171.1733761824397 2024-12-09T16:30:56,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741839_1015 (size=7739) 2024-12-09T16:30:56,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741839_1015 (size=7739) 2024-12-09T16:30:58,120 INFO [FSHLog-0-hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f-prefix:dfce6ce5b10c,46053,1733761810171 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32943,DS-4c53d357-0bf9-410c-8293-00c42bd94598,DISK], DatanodeInfoWithStorage[127.0.0.1:36725,DS-e1b376d1-31c9-4925-a105-b91c5c23a312,DISK]] 2024-12-09T16:30:59,795 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 9978dbb66d8f1739949a35bacb66b078, had cached 0 bytes from a total of 25018 2024-12-09T16:31:00,324 INFO [FSHLog-0-hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f-prefix:dfce6ce5b10c,46053,1733761810171 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32943,DS-4c53d357-0bf9-410c-8293-00c42bd94598,DISK], DatanodeInfoWithStorage[127.0.0.1:36725,DS-e1b376d1-31c9-4925-a105-b91c5c23a312,DISK]] 2024-12-09T16:31:02,528 INFO [FSHLog-0-hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f-prefix:dfce6ce5b10c,46053,1733761810171 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32943,DS-4c53d357-0bf9-410c-8293-00c42bd94598,DISK], DatanodeInfoWithStorage[127.0.0.1:36725,DS-e1b376d1-31c9-4925-a105-b91c5c23a312,DISK]] 2024-12-09T16:31:04,733 INFO [FSHLog-0-hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f-prefix:dfce6ce5b10c,46053,1733761810171 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32943,DS-4c53d357-0bf9-410c-8293-00c42bd94598,DISK], DatanodeInfoWithStorage[127.0.0.1:36725,DS-e1b376d1-31c9-4925-a105-b91c5c23a312,DISK]] 2024-12-09T16:31:06,735 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:31:06,736 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C46053%2C1733761810171.1733761866736 2024-12-09T16:31:08,003 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T16:31:11,756 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5015 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32943,DS-4c53d357-0bf9-410c-8293-00c42bd94598,DISK], DatanodeInfoWithStorage[127.0.0.1:36725,DS-e1b376d1-31c9-4925-a105-b91c5c23a312,DISK]] 2024-12-09T16:31:11,762 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5015 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32943,DS-4c53d357-0bf9-410c-8293-00c42bd94598,DISK], DatanodeInfoWithStorage[127.0.0.1:36725,DS-e1b376d1-31c9-4925-a105-b91c5c23a312,DISK]] 2024-12-09T16:31:11,762 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog dfce6ce5b10c%2C46053%2C1733761810171:(num 1733761866736) roll requested 2024-12-09T16:31:11,766 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:11,766 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:11,766 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:11,767 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:11,767 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:11,767 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/WALs/dfce6ce5b10c,46053,1733761810171/dfce6ce5b10c%2C46053%2C1733761810171.1733761855919 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/WALs/dfce6ce5b10c,46053,1733761810171/dfce6ce5b10c%2C46053%2C1733761810171.1733761866736 2024-12-09T16:31:11,771 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38321:38321),(127.0.0.1/127.0.0.1:44395:44395)] 2024-12-09T16:31:11,771 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/WALs/dfce6ce5b10c,46053,1733761810171/dfce6ce5b10c%2C46053%2C1733761810171.1733761855919 is not closed yet, will try archiving it next time 2024-12-09T16:31:11,773 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C46053%2C1733761810171.1733761871772 2024-12-09T16:31:11,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741841_1017 (size=4753) 2024-12-09T16:31:11,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741841_1017 (size=4753) 2024-12-09T16:31:16,781 INFO [FSHLog-0-hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f-prefix:dfce6ce5b10c,46053,1733761810171 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32943,DS-4c53d357-0bf9-410c-8293-00c42bd94598,DISK], DatanodeInfoWithStorage[127.0.0.1:36725,DS-e1b376d1-31c9-4925-a105-b91c5c23a312,DISK]] 2024-12-09T16:31:16,781 WARN [FSHLog-0-hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f-prefix:dfce6ce5b10c,46053,1733761810171 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32943,DS-4c53d357-0bf9-410c-8293-00c42bd94598,DISK], DatanodeInfoWithStorage[127.0.0.1:36725,DS-e1b376d1-31c9-4925-a105-b91c5c23a312,DISK]] 2024-12-09T16:31:16,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46053 {}] regionserver.HRegion(8855): Flush requested on 9978dbb66d8f1739949a35bacb66b078 2024-12-09T16:31:16,781 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9978dbb66d8f1739949a35bacb66b078 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T16:31:16,795 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5015 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32943,DS-4c53d357-0bf9-410c-8293-00c42bd94598,DISK], DatanodeInfoWithStorage[127.0.0.1:36725,DS-e1b376d1-31c9-4925-a105-b91c5c23a312,DISK]] 2024-12-09T16:31:16,795 WARN [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5015 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32943,DS-4c53d357-0bf9-410c-8293-00c42bd94598,DISK], DatanodeInfoWithStorage[127.0.0.1:36725,DS-e1b376d1-31c9-4925-a105-b91c5c23a312,DISK]] 2024-12-09T16:31:18,783 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:31:21,784 INFO [FSHLog-0-hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f-prefix:dfce6ce5b10c,46053,1733761810171 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32943,DS-4c53d357-0bf9-410c-8293-00c42bd94598,DISK], DatanodeInfoWithStorage[127.0.0.1:36725,DS-e1b376d1-31c9-4925-a105-b91c5c23a312,DISK]] 2024-12-09T16:31:21,784 WARN [FSHLog-0-hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f-prefix:dfce6ce5b10c,46053,1733761810171 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:32943,DS-4c53d357-0bf9-410c-8293-00c42bd94598,DISK], DatanodeInfoWithStorage[127.0.0.1:36725,DS-e1b376d1-31c9-4925-a105-b91c5c23a312,DISK]] 2024-12-09T16:31:21,785 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:21,785 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:21,785 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:21,785 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:21,786 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:21,786 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/WALs/dfce6ce5b10c,46053,1733761810171/dfce6ce5b10c%2C46053%2C1733761810171.1733761866736 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/WALs/dfce6ce5b10c,46053,1733761810171/dfce6ce5b10c%2C46053%2C1733761810171.1733761871772 2024-12-09T16:31:21,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741842_1018 (size=1569) 2024-12-09T16:31:21,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741842_1018 (size=1569) 2024-12-09T16:31:21,796 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44395:44395),(127.0.0.1/127.0.0.1:38321:38321)] 2024-12-09T16:31:21,796 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/WALs/dfce6ce5b10c,46053,1733761810171/dfce6ce5b10c%2C46053%2C1733761810171.1733761866736 is not closed yet, will try archiving it next time 2024-12-09T16:31:21,797 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog dfce6ce5b10c%2C46053%2C1733761810171:(num 1733761871772) roll requested 2024-12-09T16:31:21,805 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C46053%2C1733761810171.1733761881804 2024-12-09T16:31:21,808 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/.tmp/info/068f415ce382443e850a03e88ffde3dc is 1080, key is row0015/info:/1733761853510/Put/seqid=0 2024-12-09T16:31:21,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741844_1020 (size=12509) 2024-12-09T16:31:21,839 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:21,840 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:21,840 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:21,840 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:21,840 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:21,841 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/WALs/dfce6ce5b10c,46053,1733761810171/dfce6ce5b10c%2C46053%2C1733761810171.1733761871772 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/WALs/dfce6ce5b10c,46053,1733761810171/dfce6ce5b10c%2C46053%2C1733761810171.1733761881804 2024-12-09T16:31:21,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741844_1020 (size=12509) 2024-12-09T16:31:21,844 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/.tmp/info/068f415ce382443e850a03e88ffde3dc 2024-12-09T16:31:21,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741843_1019 (size=93) 2024-12-09T16:31:21,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741843_1019 (size=93) 2024-12-09T16:31:21,854 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44395:44395),(127.0.0.1/127.0.0.1:38321:38321)] 2024-12-09T16:31:21,854 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/WALs/dfce6ce5b10c,46053,1733761810171/dfce6ce5b10c%2C46053%2C1733761810171.1733761871772 to hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/oldWALs/dfce6ce5b10c%2C46053%2C1733761810171.1733761871772 2024-12-09T16:31:21,855 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C46053%2C1733761810171.1733761881855 2024-12-09T16:31:21,878 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:21,878 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/.tmp/info/068f415ce382443e850a03e88ffde3dc as hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/068f415ce382443e850a03e88ffde3dc 2024-12-09T16:31:21,878 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:21,878 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:21,879 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:21,879 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:21,879 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/WALs/dfce6ce5b10c,46053,1733761810171/dfce6ce5b10c%2C46053%2C1733761810171.1733761881804 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/WALs/dfce6ce5b10c,46053,1733761810171/dfce6ce5b10c%2C46053%2C1733761810171.1733761881855 2024-12-09T16:31:21,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741845_1021 (size=1258) 2024-12-09T16:31:21,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741845_1021 (size=1258) 2024-12-09T16:31:21,897 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44395:44395),(127.0.0.1/127.0.0.1:38321:38321)] 2024-12-09T16:31:21,904 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/068f415ce382443e850a03e88ffde3dc, entries=7, sequenceid=31, filesize=12.2 K 2024-12-09T16:31:21,909 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=1.05 KB/1076 for 9978dbb66d8f1739949a35bacb66b078 in 5128ms, sequenceid=31, compaction requested=true 2024-12-09T16:31:21,910 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9978dbb66d8f1739949a35bacb66b078: 2024-12-09T16:31:21,910 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-12-09T16:31:21,910 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T16:31:21,910 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/9031a4d934f44f56b3f6017eae742c07 because midkey is the same as first or last row 2024-12-09T16:31:21,914 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9978dbb66d8f1739949a35bacb66b078:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T16:31:21,916 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T16:31:21,917 DEBUG [RS:0;dfce6ce5b10c:46053-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T16:31:21,921 DEBUG [RS:0;dfce6ce5b10c:46053-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T16:31:21,923 DEBUG [RS:0;dfce6ce5b10c:46053-shortCompactions-0 {}] regionserver.HStore(1541): 9978dbb66d8f1739949a35bacb66b078/info is initiating minor compaction (all files) 2024-12-09T16:31:21,924 INFO [RS:0;dfce6ce5b10c:46053-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 9978dbb66d8f1739949a35bacb66b078/info in TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078. 2024-12-09T16:31:21,925 INFO [RS:0;dfce6ce5b10c:46053-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/9031a4d934f44f56b3f6017eae742c07, hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/7c2c4066cce849ec8ef92c3a9c471311, hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/068f415ce382443e850a03e88ffde3dc] into tmpdir=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/.tmp, totalSize=36.6 K 2024-12-09T16:31:21,928 DEBUG [RS:0;dfce6ce5b10c:46053-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9031a4d934f44f56b3f6017eae742c07, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733761824427 2024-12-09T16:31:21,930 DEBUG [RS:0;dfce6ce5b10c:46053-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7c2c4066cce849ec8ef92c3a9c471311, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1733761838463 2024-12-09T16:31:21,931 DEBUG [RS:0;dfce6ce5b10c:46053-shortCompactions-0 {}] compactions.Compactor(225): Compacting 068f415ce382443e850a03e88ffde3dc, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1733761853510 2024-12-09T16:31:21,998 INFO [RS:0;dfce6ce5b10c:46053-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9978dbb66d8f1739949a35bacb66b078#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T16:31:21,999 DEBUG [RS:0;dfce6ce5b10c:46053-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/.tmp/info/52545dd01c2840aca7af022f41f962df is 1080, key is row0001/info:/1733761824427/Put/seqid=0 2024-12-09T16:31:22,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741847_1023 (size=27710) 2024-12-09T16:31:22,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741847_1023 (size=27710) 2024-12-09T16:31:22,448 DEBUG [RS:0;dfce6ce5b10c:46053-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/.tmp/info/52545dd01c2840aca7af022f41f962df as hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/52545dd01c2840aca7af022f41f962df 2024-12-09T16:31:22,479 INFO [RS:0;dfce6ce5b10c:46053-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 9978dbb66d8f1739949a35bacb66b078/info of 9978dbb66d8f1739949a35bacb66b078 into 52545dd01c2840aca7af022f41f962df(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T16:31:22,480 DEBUG [RS:0;dfce6ce5b10c:46053-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 9978dbb66d8f1739949a35bacb66b078: 2024-12-09T16:31:22,483 INFO [RS:0;dfce6ce5b10c:46053-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078., storeName=9978dbb66d8f1739949a35bacb66b078/info, priority=13, startTime=1733761881911; duration=0sec 2024-12-09T16:31:22,483 DEBUG [RS:0;dfce6ce5b10c:46053-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-09T16:31:22,484 DEBUG [RS:0;dfce6ce5b10c:46053-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T16:31:22,484 DEBUG [RS:0;dfce6ce5b10c:46053-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/52545dd01c2840aca7af022f41f962df because midkey is the same as first or last row 2024-12-09T16:31:22,485 DEBUG [RS:0;dfce6ce5b10c:46053-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-09T16:31:22,485 DEBUG [RS:0;dfce6ce5b10c:46053-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T16:31:22,485 DEBUG [RS:0;dfce6ce5b10c:46053-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/52545dd01c2840aca7af022f41f962df because midkey is the same as first or last row 2024-12-09T16:31:22,485 DEBUG [RS:0;dfce6ce5b10c:46053-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-09T16:31:22,485 DEBUG [RS:0;dfce6ce5b10c:46053-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T16:31:22,485 DEBUG [RS:0;dfce6ce5b10c:46053-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/52545dd01c2840aca7af022f41f962df because midkey is the same as first or last row 2024-12-09T16:31:22,485 DEBUG [RS:0;dfce6ce5b10c:46053-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T16:31:22,486 DEBUG [RS:0;dfce6ce5b10c:46053-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9978dbb66d8f1739949a35bacb66b078:info 2024-12-09T16:31:33,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46053 {}] regionserver.HRegion(8855): Flush requested on 9978dbb66d8f1739949a35bacb66b078 2024-12-09T16:31:33,921 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9978dbb66d8f1739949a35bacb66b078 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T16:31:33,966 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/.tmp/info/e4b87b30f8e74884af1781ae51d2b25a is 1080, key is row0022/info:/1733761881856/Put/seqid=0 2024-12-09T16:31:33,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741848_1024 (size=12509) 2024-12-09T16:31:33,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741848_1024 (size=12509) 2024-12-09T16:31:33,987 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/.tmp/info/e4b87b30f8e74884af1781ae51d2b25a 2024-12-09T16:31:34,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/.tmp/info/e4b87b30f8e74884af1781ae51d2b25a as hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/e4b87b30f8e74884af1781ae51d2b25a 2024-12-09T16:31:34,055 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/e4b87b30f8e74884af1781ae51d2b25a, entries=7, sequenceid=42, filesize=12.2 K 2024-12-09T16:31:34,061 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 9978dbb66d8f1739949a35bacb66b078 in 139ms, sequenceid=42, compaction requested=false 2024-12-09T16:31:34,061 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9978dbb66d8f1739949a35bacb66b078: 2024-12-09T16:31:34,061 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-12-09T16:31:34,062 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T16:31:34,062 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/52545dd01c2840aca7af022f41f962df because midkey is the same as first or last row 2024-12-09T16:31:38,003 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T16:31:41,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T16:31:41,971 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T16:31:41,972 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T16:31:41,979 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:31:41,980 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:31:41,980 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T16:31:41,981 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T16:31:41,981 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1496649153, stopped=false 2024-12-09T16:31:41,981 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=dfce6ce5b10c,45127,1733761809123 2024-12-09T16:31:42,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T16:31:42,023 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T16:31:42,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:42,024 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T16:31:42,024 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T16:31:42,024 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:31:42,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46053-0x100bfc3b5960001, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T16:31:42,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46053-0x100bfc3b5960001, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:42,024 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'dfce6ce5b10c,46053,1733761810171' ***** 2024-12-09T16:31:42,024 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T16:31:42,025 INFO [RS:0;dfce6ce5b10c:46053 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T16:31:42,025 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:31:42,025 INFO [RS:0;dfce6ce5b10c:46053 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T16:31:42,025 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T16:31:42,025 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46053-0x100bfc3b5960001, quorum=127.0.0.1:63146, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:31:42,026 INFO [RS:0;dfce6ce5b10c:46053 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T16:31:42,027 INFO [RS:0;dfce6ce5b10c:46053 {}] regionserver.HRegionServer(3091): Received CLOSE for 9978dbb66d8f1739949a35bacb66b078 2024-12-09T16:31:42,038 INFO [RS:0;dfce6ce5b10c:46053 {}] regionserver.HRegionServer(959): stopping server dfce6ce5b10c,46053,1733761810171 2024-12-09T16:31:42,038 INFO [RS:0;dfce6ce5b10c:46053 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T16:31:42,039 INFO [RS:0;dfce6ce5b10c:46053 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;dfce6ce5b10c:46053. 2024-12-09T16:31:42,039 DEBUG [RS:0;dfce6ce5b10c:46053 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T16:31:42,039 DEBUG [RS:0;dfce6ce5b10c:46053 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:31:42,039 INFO [RS:0;dfce6ce5b10c:46053 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T16:31:42,039 INFO [RS:0;dfce6ce5b10c:46053 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T16:31:42,039 INFO [RS:0;dfce6ce5b10c:46053 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T16:31:42,039 INFO [RS:0;dfce6ce5b10c:46053 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T16:31:42,042 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 9978dbb66d8f1739949a35bacb66b078, disabling compactions & flushes 2024-12-09T16:31:42,043 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078. 2024-12-09T16:31:42,043 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078. 2024-12-09T16:31:42,043 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078. after waiting 0 ms 2024-12-09T16:31:42,043 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078. 2024-12-09T16:31:42,044 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 9978dbb66d8f1739949a35bacb66b078 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-09T16:31:42,045 INFO [RS:0;dfce6ce5b10c:46053 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T16:31:42,045 DEBUG [RS:0;dfce6ce5b10c:46053 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 9978dbb66d8f1739949a35bacb66b078=TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078.} 2024-12-09T16:31:42,046 DEBUG [RS:0;dfce6ce5b10c:46053 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 9978dbb66d8f1739949a35bacb66b078 2024-12-09T16:31:42,048 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T16:31:42,048 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T16:31:42,048 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T16:31:42,052 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T16:31:42,053 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T16:31:42,056 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-12-09T16:31:42,062 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/.tmp/info/1fa0b2cf5bcb438dba8168294cd11b2f is 1080, key is row0029/info:/1733761895942/Put/seqid=0 2024-12-09T16:31:42,114 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/hbase/meta/1588230740/.tmp/info/eeaf9e3e36dd4fcbb7a8493bb15b5cee is 195, key is TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078./info:regioninfo/1733761814831/Put/seqid=0 2024-12-09T16:31:42,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741849_1025 (size=8193) 2024-12-09T16:31:42,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741849_1025 (size=8193) 2024-12-09T16:31:42,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741850_1026 (size=7016) 2024-12-09T16:31:42,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741850_1026 (size=7016) 2024-12-09T16:31:42,246 DEBUG [RS:0;dfce6ce5b10c:46053 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 9978dbb66d8f1739949a35bacb66b078 2024-12-09T16:31:42,447 DEBUG [RS:0;dfce6ce5b10c:46053 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 9978dbb66d8f1739949a35bacb66b078 2024-12-09T16:31:42,536 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/.tmp/info/1fa0b2cf5bcb438dba8168294cd11b2f 2024-12-09T16:31:42,558 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/.tmp/info/1fa0b2cf5bcb438dba8168294cd11b2f as hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/1fa0b2cf5bcb438dba8168294cd11b2f 2024-12-09T16:31:42,562 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/hbase/meta/1588230740/.tmp/info/eeaf9e3e36dd4fcbb7a8493bb15b5cee 2024-12-09T16:31:42,589 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/1fa0b2cf5bcb438dba8168294cd11b2f, entries=3, sequenceid=48, filesize=8.0 K 2024-12-09T16:31:42,596 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 9978dbb66d8f1739949a35bacb66b078 in 553ms, sequenceid=48, compaction requested=true 2024-12-09T16:31:42,610 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/9031a4d934f44f56b3f6017eae742c07, hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/7c2c4066cce849ec8ef92c3a9c471311, hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/068f415ce382443e850a03e88ffde3dc] to archive 2024-12-09T16:31:42,622 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T16:31:42,630 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/hbase/meta/1588230740/.tmp/ns/1f5f12404cde4791b7ffac091909d456 is 43, key is default/ns:d/1733761813931/Put/seqid=0 2024-12-09T16:31:42,637 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/9031a4d934f44f56b3f6017eae742c07 to hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/archive/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/9031a4d934f44f56b3f6017eae742c07 2024-12-09T16:31:42,641 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/7c2c4066cce849ec8ef92c3a9c471311 to hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/archive/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/7c2c4066cce849ec8ef92c3a9c471311 2024-12-09T16:31:42,647 DEBUG [RS:0;dfce6ce5b10c:46053 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 9978dbb66d8f1739949a35bacb66b078 2024-12-09T16:31:42,648 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/068f415ce382443e850a03e88ffde3dc to hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/archive/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/info/068f415ce382443e850a03e88ffde3dc 2024-12-09T16:31:42,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741851_1027 (size=5153) 2024-12-09T16:31:42,659 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/hbase/meta/1588230740/.tmp/ns/1f5f12404cde4791b7ffac091909d456 2024-12-09T16:31:42,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741851_1027 (size=5153) 2024-12-09T16:31:42,679 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=dfce6ce5b10c:45127 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-09T16:31:42,691 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [9031a4d934f44f56b3f6017eae742c07=12509, 7c2c4066cce849ec8ef92c3a9c471311=12509, 068f415ce382443e850a03e88ffde3dc=12509] 2024-12-09T16:31:42,720 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/default/TestLogRolling-testSlowSyncLogRolling/9978dbb66d8f1739949a35bacb66b078/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-12-09T16:31:42,724 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078. 2024-12-09T16:31:42,724 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 9978dbb66d8f1739949a35bacb66b078: Waiting for close lock at 1733761902041Running coprocessor pre-close hooks at 1733761902042 (+1 ms)Disabling compacts and flushes for region at 1733761902042Disabling writes for close at 1733761902043 (+1 ms)Obtaining lock to block concurrent updates at 1733761902044 (+1 ms)Preparing flush snapshotting stores in 9978dbb66d8f1739949a35bacb66b078 at 1733761902044Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1733761902046 (+2 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078. at 1733761902051 (+5 ms)Flushing 9978dbb66d8f1739949a35bacb66b078/info: creating writer at 1733761902051Flushing 9978dbb66d8f1739949a35bacb66b078/info: appending metadata at 1733761902061 (+10 ms)Flushing 9978dbb66d8f1739949a35bacb66b078/info: closing flushed file at 1733761902061Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@17eab76b: reopening flushed file at 1733761902556 (+495 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 9978dbb66d8f1739949a35bacb66b078 in 553ms, sequenceid=48, compaction requested=true at 1733761902597 (+41 ms)Writing region close event to WAL at 1733761902698 (+101 ms)Running coprocessor post-close hooks at 1733761902722 (+24 ms)Closed at 1733761902724 (+2 ms) 2024-12-09T16:31:42,725 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1733761814269.9978dbb66d8f1739949a35bacb66b078. 2024-12-09T16:31:42,737 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/hbase/meta/1588230740/.tmp/table/5fd6438306ee49b492771310015557fe is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1733761814859/Put/seqid=0 2024-12-09T16:31:42,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741852_1028 (size=5396) 2024-12-09T16:31:42,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741852_1028 (size=5396) 2024-12-09T16:31:42,775 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/hbase/meta/1588230740/.tmp/table/5fd6438306ee49b492771310015557fe 2024-12-09T16:31:42,803 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/hbase/meta/1588230740/.tmp/info/eeaf9e3e36dd4fcbb7a8493bb15b5cee as hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/hbase/meta/1588230740/info/eeaf9e3e36dd4fcbb7a8493bb15b5cee 2024-12-09T16:31:42,824 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/hbase/meta/1588230740/info/eeaf9e3e36dd4fcbb7a8493bb15b5cee, entries=10, sequenceid=11, filesize=6.9 K 2024-12-09T16:31:42,827 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/hbase/meta/1588230740/.tmp/ns/1f5f12404cde4791b7ffac091909d456 as hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/hbase/meta/1588230740/ns/1f5f12404cde4791b7ffac091909d456 2024-12-09T16:31:42,847 DEBUG [RS:0;dfce6ce5b10c:46053 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T16:31:42,855 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/hbase/meta/1588230740/ns/1f5f12404cde4791b7ffac091909d456, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T16:31:42,858 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/hbase/meta/1588230740/.tmp/table/5fd6438306ee49b492771310015557fe as hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/hbase/meta/1588230740/table/5fd6438306ee49b492771310015557fe 2024-12-09T16:31:42,909 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/hbase/meta/1588230740/table/5fd6438306ee49b492771310015557fe, entries=2, sequenceid=11, filesize=5.3 K 2024-12-09T16:31:42,913 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 858ms, sequenceid=11, compaction requested=false 2024-12-09T16:31:42,928 INFO [regionserver/dfce6ce5b10c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T16:31:42,928 INFO [regionserver/dfce6ce5b10c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T16:31:42,937 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T16:31:42,938 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T16:31:42,938 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T16:31:42,939 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733761902048Running coprocessor pre-close hooks at 1733761902048Disabling compacts and flushes for region at 1733761902048Disabling writes for close at 1733761902052 (+4 ms)Obtaining lock to block concurrent updates at 1733761902056 (+4 ms)Preparing flush snapshotting stores in 1588230740 at 1733761902056Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1733761902056Flushing stores of hbase:meta,,1.1588230740 at 1733761902059 (+3 ms)Flushing 1588230740/info: creating writer at 1733761902060 (+1 ms)Flushing 1588230740/info: appending metadata at 1733761902113 (+53 ms)Flushing 1588230740/info: closing flushed file at 1733761902113Flushing 1588230740/ns: creating writer at 1733761902594 (+481 ms)Flushing 1588230740/ns: appending metadata at 1733761902629 (+35 ms)Flushing 1588230740/ns: closing flushed file at 1733761902629Flushing 1588230740/table: creating writer at 1733761902697 (+68 ms)Flushing 1588230740/table: appending metadata at 1733761902736 (+39 ms)Flushing 1588230740/table: closing flushed file at 1733761902737 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4ea195e1: reopening flushed file at 1733761902800 (+63 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2bb76019: reopening flushed file at 1733761902825 (+25 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@d9ff0ab: reopening flushed file at 1733761902855 (+30 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 858ms, sequenceid=11, compaction requested=false at 1733761902914 (+59 ms)Writing region close event to WAL at 1733761902920 (+6 ms)Running coprocessor post-close hooks at 1733761902938 (+18 ms)Closed at 1733761902938 2024-12-09T16:31:42,939 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T16:31:42,943 INFO [regionserver/dfce6ce5b10c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T16:31:43,048 INFO [RS:0;dfce6ce5b10c:46053 {}] regionserver.HRegionServer(976): stopping server dfce6ce5b10c,46053,1733761810171; all regions closed. 2024-12-09T16:31:43,050 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:43,051 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:43,051 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:43,052 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:43,052 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:43,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741834_1010 (size=3066) 2024-12-09T16:31:43,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741834_1010 (size=3066) 2024-12-09T16:31:43,073 DEBUG [RS:0;dfce6ce5b10c:46053 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/oldWALs 2024-12-09T16:31:43,074 INFO [RS:0;dfce6ce5b10c:46053 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog dfce6ce5b10c%2C46053%2C1733761810171.meta:.meta(num 1733761813570) 2024-12-09T16:31:43,075 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:43,075 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:43,075 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:43,076 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:43,076 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:43,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741846_1022 (size=13040) 2024-12-09T16:31:43,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741846_1022 (size=13040) 2024-12-09T16:31:43,101 DEBUG [RS:0;dfce6ce5b10c:46053 {}] wal.AbstractFSWAL(1256): Moved 5 WAL file(s) to /user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/oldWALs 2024-12-09T16:31:43,101 INFO [RS:0;dfce6ce5b10c:46053 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog dfce6ce5b10c%2C46053%2C1733761810171:(num 1733761881855) 2024-12-09T16:31:43,101 DEBUG [RS:0;dfce6ce5b10c:46053 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:31:43,101 INFO [RS:0;dfce6ce5b10c:46053 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T16:31:43,101 INFO [RS:0;dfce6ce5b10c:46053 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T16:31:43,102 INFO [RS:0;dfce6ce5b10c:46053 {}] hbase.ChoreService(370): Chore service for: regionserver/dfce6ce5b10c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T16:31:43,102 INFO [RS:0;dfce6ce5b10c:46053 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T16:31:43,102 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T16:31:43,103 INFO [RS:0;dfce6ce5b10c:46053 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46053 2024-12-09T16:31:43,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46053-0x100bfc3b5960001, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/dfce6ce5b10c,46053,1733761810171 2024-12-09T16:31:43,151 INFO [RS:0;dfce6ce5b10c:46053 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T16:31:43,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T16:31:43,165 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [dfce6ce5b10c,46053,1733761810171] 2024-12-09T16:31:43,173 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/dfce6ce5b10c,46053,1733761810171 already deleted, retry=false 2024-12-09T16:31:43,173 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; dfce6ce5b10c,46053,1733761810171 expired; onlineServers=0 2024-12-09T16:31:43,173 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'dfce6ce5b10c,45127,1733761809123' ***** 2024-12-09T16:31:43,173 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T16:31:43,173 INFO [M:0;dfce6ce5b10c:45127 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T16:31:43,174 INFO [M:0;dfce6ce5b10c:45127 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T16:31:43,174 DEBUG [M:0;dfce6ce5b10c:45127 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T16:31:43,174 DEBUG [M:0;dfce6ce5b10c:45127 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T16:31:43,174 INFO [M:0;dfce6ce5b10c:45127 {}] hbase.ChoreService(370): Chore service for: master/dfce6ce5b10c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T16:31:43,174 INFO [M:0;dfce6ce5b10c:45127 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T16:31:43,174 DEBUG [M:0;dfce6ce5b10c:45127 {}] master.HMaster(1795): Stopping service threads 2024-12-09T16:31:43,174 INFO [M:0;dfce6ce5b10c:45127 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T16:31:43,175 INFO [M:0;dfce6ce5b10c:45127 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T16:31:43,175 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.small.0-1733761812686 {}] cleaner.HFileCleaner(306): Exit Thread[master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.small.0-1733761812686,5,FailOnTimeoutGroup] 2024-12-09T16:31:43,175 INFO [M:0;dfce6ce5b10c:45127 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T16:31:43,175 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T16:31:43,176 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T16:31:43,177 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.large.0-1733761812682 {}] cleaner.HFileCleaner(306): Exit Thread[master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.large.0-1733761812682,5,FailOnTimeoutGroup] 2024-12-09T16:31:43,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T16:31:43,187 DEBUG [M:0;dfce6ce5b10c:45127 {}] zookeeper.ZKUtil(347): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T16:31:43,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:43,187 WARN [M:0;dfce6ce5b10c:45127 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T16:31:43,194 INFO [M:0;dfce6ce5b10c:45127 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/.lastflushedseqids 2024-12-09T16:31:43,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741853_1029 (size=130) 2024-12-09T16:31:43,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741853_1029 (size=130) 2024-12-09T16:31:43,266 INFO [RS:0;dfce6ce5b10c:46053 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T16:31:43,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46053-0x100bfc3b5960001, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:31:43,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46053-0x100bfc3b5960001, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:31:43,266 INFO [RS:0;dfce6ce5b10c:46053 {}] regionserver.HRegionServer(1031): Exiting; stopping=dfce6ce5b10c,46053,1733761810171; zookeeper connection closed. 2024-12-09T16:31:43,267 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@44a2a844 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@44a2a844 2024-12-09T16:31:43,267 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T16:31:43,269 INFO [M:0;dfce6ce5b10c:45127 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T16:31:43,269 INFO [M:0;dfce6ce5b10c:45127 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T16:31:43,269 DEBUG [M:0;dfce6ce5b10c:45127 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T16:31:43,269 INFO [M:0;dfce6ce5b10c:45127 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:31:43,269 DEBUG [M:0;dfce6ce5b10c:45127 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:31:43,269 DEBUG [M:0;dfce6ce5b10c:45127 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T16:31:43,269 DEBUG [M:0;dfce6ce5b10c:45127 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:31:43,269 INFO [M:0;dfce6ce5b10c:45127 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-12-09T16:31:43,292 DEBUG [M:0;dfce6ce5b10c:45127 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4f7c3fdcc8b345ecbe4b57ba8d46be18 is 82, key is hbase:meta,,1/info:regioninfo/1733761813724/Put/seqid=0 2024-12-09T16:31:43,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741854_1030 (size=5672) 2024-12-09T16:31:43,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741854_1030 (size=5672) 2024-12-09T16:31:43,748 INFO [M:0;dfce6ce5b10c:45127 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4f7c3fdcc8b345ecbe4b57ba8d46be18 2024-12-09T16:31:43,823 DEBUG [M:0;dfce6ce5b10c:45127 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2db48dffac7f48c6942c4138407405c2 is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733761814869/Put/seqid=0 2024-12-09T16:31:43,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741855_1031 (size=6247) 2024-12-09T16:31:43,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741855_1031 (size=6247) 2024-12-09T16:31:43,853 INFO [M:0;dfce6ce5b10c:45127 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2db48dffac7f48c6942c4138407405c2 2024-12-09T16:31:43,885 INFO [M:0;dfce6ce5b10c:45127 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2db48dffac7f48c6942c4138407405c2 2024-12-09T16:31:43,913 DEBUG [M:0;dfce6ce5b10c:45127 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dc5e3d0fd26044e6b441b8df79a6c3b4 is 69, key is dfce6ce5b10c,46053,1733761810171/rs:state/1733761812764/Put/seqid=0 2024-12-09T16:31:43,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741856_1032 (size=5156) 2024-12-09T16:31:43,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741856_1032 (size=5156) 2024-12-09T16:31:43,935 INFO [M:0;dfce6ce5b10c:45127 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dc5e3d0fd26044e6b441b8df79a6c3b4 2024-12-09T16:31:43,995 DEBUG [M:0;dfce6ce5b10c:45127 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/52e8d37f1caa4c67bc02e880d804d544 is 52, key is load_balancer_on/state:d/1733761814241/Put/seqid=0 2024-12-09T16:31:44,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741857_1033 (size=5056) 2024-12-09T16:31:44,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741857_1033 (size=5056) 2024-12-09T16:31:44,046 INFO [M:0;dfce6ce5b10c:45127 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/52e8d37f1caa4c67bc02e880d804d544 2024-12-09T16:31:44,068 DEBUG [M:0;dfce6ce5b10c:45127 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4f7c3fdcc8b345ecbe4b57ba8d46be18 as hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4f7c3fdcc8b345ecbe4b57ba8d46be18 2024-12-09T16:31:44,101 INFO [M:0;dfce6ce5b10c:45127 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4f7c3fdcc8b345ecbe4b57ba8d46be18, entries=8, sequenceid=59, filesize=5.5 K 2024-12-09T16:31:44,107 DEBUG [M:0;dfce6ce5b10c:45127 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2db48dffac7f48c6942c4138407405c2 as hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2db48dffac7f48c6942c4138407405c2 2024-12-09T16:31:44,126 INFO [M:0;dfce6ce5b10c:45127 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2db48dffac7f48c6942c4138407405c2 2024-12-09T16:31:44,126 INFO [M:0;dfce6ce5b10c:45127 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2db48dffac7f48c6942c4138407405c2, entries=6, sequenceid=59, filesize=6.1 K 2024-12-09T16:31:44,134 DEBUG [M:0;dfce6ce5b10c:45127 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/dc5e3d0fd26044e6b441b8df79a6c3b4 as hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/dc5e3d0fd26044e6b441b8df79a6c3b4 2024-12-09T16:31:44,164 INFO [M:0;dfce6ce5b10c:45127 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/dc5e3d0fd26044e6b441b8df79a6c3b4, entries=1, sequenceid=59, filesize=5.0 K 2024-12-09T16:31:44,166 DEBUG [M:0;dfce6ce5b10c:45127 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/52e8d37f1caa4c67bc02e880d804d544 as hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/52e8d37f1caa4c67bc02e880d804d544 2024-12-09T16:31:44,185 INFO [M:0;dfce6ce5b10c:45127 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/52e8d37f1caa4c67bc02e880d804d544, entries=1, sequenceid=59, filesize=4.9 K 2024-12-09T16:31:44,188 INFO [M:0;dfce6ce5b10c:45127 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 919ms, sequenceid=59, compaction requested=false 2024-12-09T16:31:44,214 INFO [M:0;dfce6ce5b10c:45127 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:31:44,215 DEBUG [M:0;dfce6ce5b10c:45127 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733761903269Disabling compacts and flushes for region at 1733761903269Disabling writes for close at 1733761903269Obtaining lock to block concurrent updates at 1733761903269Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733761903269Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1733761903270 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733761903271 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733761903271Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733761903292 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733761903292Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733761903779 (+487 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733761903822 (+43 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733761903822Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733761903885 (+63 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733761903911 (+26 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733761903911Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733761903956 (+45 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733761903994 (+38 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733761903994Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e7d8cbf: reopening flushed file at 1733761904066 (+72 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e9bdde9: reopening flushed file at 1733761904101 (+35 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3706f66f: reopening flushed file at 1733761904127 (+26 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3cbd459f: reopening flushed file at 1733761904164 (+37 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 919ms, sequenceid=59, compaction requested=false at 1733761904188 (+24 ms)Writing region close event to WAL at 1733761904214 (+26 ms)Closed at 1733761904214 2024-12-09T16:31:44,217 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:44,217 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:44,217 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:44,217 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:44,218 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:44,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32943 is added to blk_1073741830_1006 (size=27973) 2024-12-09T16:31:44,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36725 is added to blk_1073741830_1006 (size=27973) 2024-12-09T16:31:44,236 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T16:31:44,237 INFO [M:0;dfce6ce5b10c:45127 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T16:31:44,237 INFO [M:0;dfce6ce5b10c:45127 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45127 2024-12-09T16:31:44,240 INFO [M:0;dfce6ce5b10c:45127 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T16:31:44,397 INFO [M:0;dfce6ce5b10c:45127 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T16:31:44,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:31:44,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45127-0x100bfc3b5960000, quorum=127.0.0.1:63146, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:31:44,438 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@55d18735{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:31:44,442 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@15370523{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:31:44,442 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:31:44,442 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@335a4f9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:31:44,443 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ae73635{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/hadoop.log.dir/,STOPPED} 2024-12-09T16:31:44,450 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T16:31:44,450 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T16:31:44,454 WARN [BP-1356906885-172.17.0.2-1733761803868 heartbeating to localhost/127.0.0.1:38839 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T16:31:44,454 WARN [BP-1356906885-172.17.0.2-1733761803868 heartbeating to localhost/127.0.0.1:38839 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1356906885-172.17.0.2-1733761803868 (Datanode Uuid 023175ce-193e-4521-882c-21415e8da82b) service to localhost/127.0.0.1:38839 2024-12-09T16:31:44,456 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/cluster_f2ab7ebb-95bd-0eda-ded6-21ca768e7c67/data/data3/current/BP-1356906885-172.17.0.2-1733761803868 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:31:44,456 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/cluster_f2ab7ebb-95bd-0eda-ded6-21ca768e7c67/data/data4/current/BP-1356906885-172.17.0.2-1733761803868 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:31:44,457 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T16:31:44,463 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@59e63bea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:31:44,464 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@264a9341{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:31:44,464 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:31:44,464 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5bdc1e47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:31:44,465 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2aa5bb6e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/hadoop.log.dir/,STOPPED} 2024-12-09T16:31:44,471 WARN [BP-1356906885-172.17.0.2-1733761803868 heartbeating to localhost/127.0.0.1:38839 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T16:31:44,471 WARN [BP-1356906885-172.17.0.2-1733761803868 heartbeating to localhost/127.0.0.1:38839 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1356906885-172.17.0.2-1733761803868 (Datanode Uuid 0f1f2acf-b8c3-4fbf-bc65-153178c47535) service to localhost/127.0.0.1:38839 2024-12-09T16:31:44,472 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/cluster_f2ab7ebb-95bd-0eda-ded6-21ca768e7c67/data/data1/current/BP-1356906885-172.17.0.2-1733761803868 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:31:44,472 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T16:31:44,472 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T16:31:44,473 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T16:31:44,474 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/cluster_f2ab7ebb-95bd-0eda-ded6-21ca768e7c67/data/data2/current/BP-1356906885-172.17.0.2-1733761803868 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:31:44,496 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c77270f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T16:31:44,496 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c5145e6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:31:44,497 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:31:44,497 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46a86f8c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:31:44,497 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f2ab976{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/hadoop.log.dir/,STOPPED} 2024-12-09T16:31:44,520 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T16:31:44,582 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T16:31:44,625 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=79 (was 12) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38839 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38839 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/dfce6ce5b10c:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38839 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38839 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:38839 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:38839 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@609afd55 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/dfce6ce5b10c:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:38839 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/dfce6ce5b10c:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:38839 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=405 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=644 (was 889), ProcessCount=11 (was 11), AvailableMemoryMB=7506 (was 7316) - AvailableMemoryMB LEAK? - 2024-12-09T16:31:44,652 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=80, OpenFileDescriptor=405, MaxFileDescriptor=1048576, SystemLoadAverage=644, ProcessCount=11, AvailableMemoryMB=7501 2024-12-09T16:31:44,652 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T16:31:44,653 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/hadoop.log.dir so I do NOT create it in target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027 2024-12-09T16:31:44,653 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/93a9e8bd-dcbe-ed9d-2cff-3e4f1519874d/hadoop.tmp.dir so I do NOT create it in target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027 2024-12-09T16:31:44,653 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/cluster_1359a182-aff4-32a0-dc2f-6f92bccf3463, deleteOnExit=true 2024-12-09T16:31:44,653 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T16:31:44,653 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/test.cache.data in system properties and HBase conf 2024-12-09T16:31:44,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T16:31:44,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/hadoop.log.dir in system properties and HBase conf 2024-12-09T16:31:44,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T16:31:44,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T16:31:44,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T16:31:44,654 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T16:31:44,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T16:31:44,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T16:31:44,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T16:31:44,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T16:31:44,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T16:31:44,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T16:31:44,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T16:31:44,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T16:31:44,656 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T16:31:44,656 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/nfs.dump.dir in system properties and HBase conf 2024-12-09T16:31:44,656 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/java.io.tmpdir in system properties and HBase conf 2024-12-09T16:31:44,656 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T16:31:44,656 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T16:31:44,656 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T16:31:44,691 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T16:31:45,093 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:31:45,103 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:31:45,105 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:31:45,105 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:31:45,105 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T16:31:45,107 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:31:45,108 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e09da7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:31:45,108 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@53521f59{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:31:45,257 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@329eafc0{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/java.io.tmpdir/jetty-localhost-36205-hadoop-hdfs-3_4_1-tests_jar-_-any-3475167725475186569/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T16:31:45,258 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@f407acd{HTTP/1.1, (http/1.1)}{localhost:36205} 2024-12-09T16:31:45,258 INFO [Time-limited test {}] server.Server(415): Started @104424ms 2024-12-09T16:31:45,274 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T16:31:45,564 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:31:45,571 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:31:45,598 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:31:45,598 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:31:45,598 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T16:31:45,605 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b2eee9a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:31:45,605 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ba5815a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:31:45,751 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2d914c2b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/java.io.tmpdir/jetty-localhost-33949-hadoop-hdfs-3_4_1-tests_jar-_-any-5314599578653017883/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:31:45,752 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@75aebd6a{HTTP/1.1, (http/1.1)}{localhost:33949} 2024-12-09T16:31:45,752 INFO [Time-limited test {}] server.Server(415): Started @104918ms 2024-12-09T16:31:45,755 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T16:31:46,033 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:31:46,040 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:31:46,054 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:31:46,054 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:31:46,054 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T16:31:46,057 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ea4d062{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:31:46,058 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ab4268e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:31:46,219 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4ce6afee{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/java.io.tmpdir/jetty-localhost-44003-hadoop-hdfs-3_4_1-tests_jar-_-any-4878276639145610759/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:31:46,219 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@62601d9d{HTTP/1.1, (http/1.1)}{localhost:44003} 2024-12-09T16:31:46,220 INFO [Time-limited test {}] server.Server(415): Started @105385ms 2024-12-09T16:31:46,222 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T16:31:46,825 WARN [Thread-444 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/cluster_1359a182-aff4-32a0-dc2f-6f92bccf3463/data/data1/current/BP-482272738-172.17.0.2-1733761904717/current, will proceed with Du for space computation calculation, 2024-12-09T16:31:46,826 WARN [Thread-445 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/cluster_1359a182-aff4-32a0-dc2f-6f92bccf3463/data/data2/current/BP-482272738-172.17.0.2-1733761904717/current, will proceed with Du for space computation calculation, 2024-12-09T16:31:46,868 WARN [Thread-409 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T16:31:46,871 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x88379c9670cd555f with lease ID 0x3e11ad1ddf0406ed: Processing first storage report for DS-6a622d99-b1a2-42ad-8a76-84031eb4df88 from datanode DatanodeRegistration(127.0.0.1:37945, datanodeUuid=d09c98ff-7a5a-4ba8-8b65-1fc43172de24, infoPort=41075, infoSecurePort=0, ipcPort=38565, storageInfo=lv=-57;cid=testClusterID;nsid=728086925;c=1733761904717) 2024-12-09T16:31:46,872 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x88379c9670cd555f with lease ID 0x3e11ad1ddf0406ed: from storage DS-6a622d99-b1a2-42ad-8a76-84031eb4df88 node DatanodeRegistration(127.0.0.1:37945, datanodeUuid=d09c98ff-7a5a-4ba8-8b65-1fc43172de24, infoPort=41075, infoSecurePort=0, ipcPort=38565, storageInfo=lv=-57;cid=testClusterID;nsid=728086925;c=1733761904717), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T16:31:46,872 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x88379c9670cd555f with lease ID 0x3e11ad1ddf0406ed: Processing first storage report for DS-b5fc5f91-629e-496b-a09b-3355fb124114 from datanode DatanodeRegistration(127.0.0.1:37945, datanodeUuid=d09c98ff-7a5a-4ba8-8b65-1fc43172de24, infoPort=41075, infoSecurePort=0, ipcPort=38565, storageInfo=lv=-57;cid=testClusterID;nsid=728086925;c=1733761904717) 2024-12-09T16:31:46,872 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x88379c9670cd555f with lease ID 0x3e11ad1ddf0406ed: from storage DS-b5fc5f91-629e-496b-a09b-3355fb124114 node DatanodeRegistration(127.0.0.1:37945, datanodeUuid=d09c98ff-7a5a-4ba8-8b65-1fc43172de24, infoPort=41075, infoSecurePort=0, ipcPort=38565, storageInfo=lv=-57;cid=testClusterID;nsid=728086925;c=1733761904717), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:31:47,022 WARN [Thread-456 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/cluster_1359a182-aff4-32a0-dc2f-6f92bccf3463/data/data3/current/BP-482272738-172.17.0.2-1733761904717/current, will proceed with Du for space computation calculation, 2024-12-09T16:31:47,022 WARN [Thread-457 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/cluster_1359a182-aff4-32a0-dc2f-6f92bccf3463/data/data4/current/BP-482272738-172.17.0.2-1733761904717/current, will proceed with Du for space computation calculation, 2024-12-09T16:31:47,051 WARN [Thread-432 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T16:31:47,055 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb9f74dede2019229 with lease ID 0x3e11ad1ddf0406ee: Processing first storage report for DS-6b6d9f8f-ccee-40d4-bed1-05f2aca1eb78 from datanode DatanodeRegistration(127.0.0.1:42131, datanodeUuid=10892b35-845d-4bab-a16f-a7f31d57d5f8, infoPort=32967, infoSecurePort=0, ipcPort=43151, storageInfo=lv=-57;cid=testClusterID;nsid=728086925;c=1733761904717) 2024-12-09T16:31:47,055 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb9f74dede2019229 with lease ID 0x3e11ad1ddf0406ee: from storage DS-6b6d9f8f-ccee-40d4-bed1-05f2aca1eb78 node DatanodeRegistration(127.0.0.1:42131, datanodeUuid=10892b35-845d-4bab-a16f-a7f31d57d5f8, infoPort=32967, infoSecurePort=0, ipcPort=43151, storageInfo=lv=-57;cid=testClusterID;nsid=728086925;c=1733761904717), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:31:47,055 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb9f74dede2019229 with lease ID 0x3e11ad1ddf0406ee: Processing first storage report for DS-7a691867-2805-4c78-84ca-5928be68c489 from datanode DatanodeRegistration(127.0.0.1:42131, datanodeUuid=10892b35-845d-4bab-a16f-a7f31d57d5f8, infoPort=32967, infoSecurePort=0, ipcPort=43151, storageInfo=lv=-57;cid=testClusterID;nsid=728086925;c=1733761904717) 2024-12-09T16:31:47,056 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb9f74dede2019229 with lease ID 0x3e11ad1ddf0406ee: from storage DS-7a691867-2805-4c78-84ca-5928be68c489 node DatanodeRegistration(127.0.0.1:42131, datanodeUuid=10892b35-845d-4bab-a16f-a7f31d57d5f8, infoPort=32967, infoSecurePort=0, ipcPort=43151, storageInfo=lv=-57;cid=testClusterID;nsid=728086925;c=1733761904717), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:31:47,086 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027 2024-12-09T16:31:47,095 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/cluster_1359a182-aff4-32a0-dc2f-6f92bccf3463/zookeeper_0, clientPort=59857, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/cluster_1359a182-aff4-32a0-dc2f-6f92bccf3463/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/cluster_1359a182-aff4-32a0-dc2f-6f92bccf3463/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T16:31:47,098 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59857 2024-12-09T16:31:47,098 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:31:47,101 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:31:47,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37945 is added to blk_1073741825_1001 (size=7) 2024-12-09T16:31:47,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42131 is added to blk_1073741825_1001 (size=7) 2024-12-09T16:31:47,131 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301 with version=8 2024-12-09T16:31:47,131 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/hbase-staging 2024-12-09T16:31:47,135 INFO [Time-limited test {}] client.ConnectionUtils(128): master/dfce6ce5b10c:0 server-side Connection retries=45 2024-12-09T16:31:47,135 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:31:47,135 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T16:31:47,135 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T16:31:47,135 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:31:47,135 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T16:31:47,135 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T16:31:47,136 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T16:31:47,137 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41579 2024-12-09T16:31:47,139 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41579 connecting to ZooKeeper ensemble=127.0.0.1:59857 2024-12-09T16:31:47,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:415790x0, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T16:31:47,185 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41579-0x100bfc5384c0000 connected 2024-12-09T16:31:47,267 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:31:47,270 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:31:47,274 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:31:47,275 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301, hbase.cluster.distributed=false 2024-12-09T16:31:47,277 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T16:31:47,281 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41579 2024-12-09T16:31:47,281 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41579 2024-12-09T16:31:47,281 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41579 2024-12-09T16:31:47,288 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41579 2024-12-09T16:31:47,289 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41579 2024-12-09T16:31:47,310 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/dfce6ce5b10c:0 server-side Connection retries=45 2024-12-09T16:31:47,310 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:31:47,311 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T16:31:47,311 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T16:31:47,311 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:31:47,311 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T16:31:47,311 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T16:31:47,311 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T16:31:47,312 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36171 2024-12-09T16:31:47,315 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36171 connecting to ZooKeeper ensemble=127.0.0.1:59857 2024-12-09T16:31:47,317 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:31:47,321 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:31:47,343 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:361710x0, quorum=127.0.0.1:59857, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:31:47,343 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T16:31:47,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:361710x0, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T16:31:47,353 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36171-0x100bfc5384c0001 connected 2024-12-09T16:31:47,354 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T16:31:47,355 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36171-0x100bfc5384c0001, quorum=127.0.0.1:59857, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T16:31:47,358 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36171-0x100bfc5384c0001, quorum=127.0.0.1:59857, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T16:31:47,372 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36171 2024-12-09T16:31:47,373 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36171 2024-12-09T16:31:47,374 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36171 2024-12-09T16:31:47,375 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36171 2024-12-09T16:31:47,376 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36171 2024-12-09T16:31:47,401 DEBUG [M:0;dfce6ce5b10c:41579 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;dfce6ce5b10c:41579 2024-12-09T16:31:47,403 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/dfce6ce5b10c,41579,1733761907134 2024-12-09T16:31:47,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36171-0x100bfc5384c0001, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:31:47,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:31:47,414 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/dfce6ce5b10c,41579,1733761907134 2024-12-09T16:31:47,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36171-0x100bfc5384c0001, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T16:31:47,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:47,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36171-0x100bfc5384c0001, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:47,426 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T16:31:47,442 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/dfce6ce5b10c,41579,1733761907134 from backup master directory 2024-12-09T16:31:47,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/dfce6ce5b10c,41579,1733761907134 2024-12-09T16:31:47,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36171-0x100bfc5384c0001, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:31:47,450 WARN [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T16:31:47,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:31:47,450 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=dfce6ce5b10c,41579,1733761907134 2024-12-09T16:31:47,458 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/hbase.id] with ID: 7a4a09f6-8135-45a6-8dfa-0d9473b79e48 2024-12-09T16:31:47,458 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/.tmp/hbase.id 2024-12-09T16:31:47,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42131 is added to blk_1073741826_1002 (size=42) 2024-12-09T16:31:47,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37945 is added to blk_1073741826_1002 (size=42) 2024-12-09T16:31:47,726 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:31:47,730 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:31:47,881 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/.tmp/hbase.id]:[hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/hbase.id] 2024-12-09T16:31:47,906 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:31:47,906 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T16:31:47,909 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-09T16:31:47,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36171-0x100bfc5384c0001, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:47,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:47,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37945 is added to blk_1073741827_1003 (size=196) 2024-12-09T16:31:47,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42131 is added to blk_1073741827_1003 (size=196) 2024-12-09T16:31:47,969 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T16:31:47,970 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T16:31:47,970 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T16:31:47,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37945 is added to blk_1073741828_1004 (size=1189) 2024-12-09T16:31:47,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42131 is added to blk_1073741828_1004 (size=1189) 2024-12-09T16:31:47,980 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/data/master/store 2024-12-09T16:31:47,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37945 is added to blk_1073741829_1005 (size=34) 2024-12-09T16:31:47,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42131 is added to blk_1073741829_1005 (size=34) 2024-12-09T16:31:47,994 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:31:47,995 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T16:31:47,995 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:31:47,995 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:31:47,995 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T16:31:47,995 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:31:47,995 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:31:47,995 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733761907994Disabling compacts and flushes for region at 1733761907994Disabling writes for close at 1733761907995 (+1 ms)Writing region close event to WAL at 1733761907995Closed at 1733761907995 2024-12-09T16:31:47,996 WARN [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/data/master/store/.initializing 2024-12-09T16:31:47,996 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/WALs/dfce6ce5b10c,41579,1733761907134 2024-12-09T16:31:48,000 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=dfce6ce5b10c%2C41579%2C1733761907134, suffix=, logDir=hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/WALs/dfce6ce5b10c,41579,1733761907134, archiveDir=hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/oldWALs, maxLogs=10 2024-12-09T16:31:48,001 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C41579%2C1733761907134.1733761908000 2024-12-09T16:31:48,008 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/WALs/dfce6ce5b10c,41579,1733761907134/dfce6ce5b10c%2C41579%2C1733761907134.1733761908000 2024-12-09T16:31:48,014 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41075:41075),(127.0.0.1/127.0.0.1:32967:32967)] 2024-12-09T16:31:48,017 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T16:31:48,018 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:31:48,018 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:31:48,018 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:31:48,020 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:31:48,022 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T16:31:48,023 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:31:48,024 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:31:48,024 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:31:48,032 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T16:31:48,032 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:31:48,033 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:31:48,033 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:31:48,036 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T16:31:48,037 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:31:48,038 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:31:48,038 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:31:48,041 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T16:31:48,041 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:31:48,042 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:31:48,042 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:31:48,044 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:31:48,045 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:31:48,047 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:31:48,047 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:31:48,048 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T16:31:48,049 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:31:48,055 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T16:31:48,056 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=730419, jitterRate=-0.07122528553009033}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T16:31:48,057 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733761908018Initializing all the Stores at 1733761908020 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733761908020Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733761908020Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733761908020Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733761908020Cleaning up temporary data from old regions at 1733761908047 (+27 ms)Region opened successfully at 1733761908057 (+10 ms) 2024-12-09T16:31:48,057 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T16:31:48,063 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1610ec66, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=dfce6ce5b10c/172.17.0.2:0 2024-12-09T16:31:48,064 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T16:31:48,064 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T16:31:48,064 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T16:31:48,064 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T16:31:48,066 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-09T16:31:48,066 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T16:31:48,066 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T16:31:48,073 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T16:31:48,075 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T16:31:48,085 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T16:31:48,086 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T16:31:48,087 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T16:31:48,096 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T16:31:48,097 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T16:31:48,098 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T16:31:48,107 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T16:31:48,109 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T16:31:48,116 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T16:31:48,118 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T16:31:48,129 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T16:31:48,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T16:31:48,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36171-0x100bfc5384c0001, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T16:31:48,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36171-0x100bfc5384c0001, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:48,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:48,142 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=dfce6ce5b10c,41579,1733761907134, sessionid=0x100bfc5384c0000, setting cluster-up flag (Was=false) 2024-12-09T16:31:48,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36171-0x100bfc5384c0001, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:48,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:48,183 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T16:31:48,184 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=dfce6ce5b10c,41579,1733761907134 2024-12-09T16:31:48,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36171-0x100bfc5384c0001, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:48,204 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:48,230 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T16:31:48,232 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=dfce6ce5b10c,41579,1733761907134 2024-12-09T16:31:48,234 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T16:31:48,236 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T16:31:48,236 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T16:31:48,236 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T16:31:48,237 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: dfce6ce5b10c,41579,1733761907134 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T16:31:48,238 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/dfce6ce5b10c:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:31:48,239 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/dfce6ce5b10c:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:31:48,239 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:31:48,239 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:31:48,239 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/dfce6ce5b10c:0, corePoolSize=10, maxPoolSize=10 2024-12-09T16:31:48,239 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:48,239 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=2, maxPoolSize=2 2024-12-09T16:31:48,239 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:48,240 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733761938240 2024-12-09T16:31:48,240 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T16:31:48,240 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T16:31:48,240 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T16:31:48,240 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T16:31:48,240 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T16:31:48,241 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T16:31:48,241 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:48,241 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T16:31:48,241 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T16:31:48,241 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T16:31:48,241 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T16:31:48,242 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T16:31:48,242 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T16:31:48,242 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T16:31:48,242 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.large.0-1733761908242,5,FailOnTimeoutGroup] 2024-12-09T16:31:48,242 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.small.0-1733761908242,5,FailOnTimeoutGroup] 2024-12-09T16:31:48,242 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:48,242 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T16:31:48,242 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:48,242 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:48,243 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:31:48,243 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T16:31:48,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42131 is added to blk_1073741831_1007 (size=1321) 2024-12-09T16:31:48,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37945 is added to blk_1073741831_1007 (size=1321) 2024-12-09T16:31:48,255 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T16:31:48,255 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301 2024-12-09T16:31:48,268 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T16:31:48,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:31:48,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42131 is added to blk_1073741832_1008 (size=32) 2024-12-09T16:31:48,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37945 is added to blk_1073741832_1008 (size=32) 2024-12-09T16:31:48,274 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:31:48,279 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T16:31:48,282 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T16:31:48,282 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:31:48,283 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:31:48,284 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T16:31:48,287 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T16:31:48,287 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:31:48,288 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:31:48,288 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T16:31:48,291 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T16:31:48,291 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:31:48,293 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:31:48,293 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T16:31:48,294 INFO [RS:0;dfce6ce5b10c:36171 {}] regionserver.HRegionServer(746): ClusterId : 7a4a09f6-8135-45a6-8dfa-0d9473b79e48 2024-12-09T16:31:48,294 DEBUG [RS:0;dfce6ce5b10c:36171 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T16:31:48,299 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T16:31:48,300 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:31:48,302 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:31:48,303 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T16:31:48,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:31:48,304 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/data/hbase/meta/1588230740 2024-12-09T16:31:48,305 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/data/hbase/meta/1588230740 2024-12-09T16:31:48,307 DEBUG [RS:0;dfce6ce5b10c:36171 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T16:31:48,307 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T16:31:48,307 DEBUG [RS:0;dfce6ce5b10c:36171 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T16:31:48,307 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T16:31:48,308 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T16:31:48,311 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T16:31:48,317 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T16:31:48,319 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=840306, jitterRate=0.06850437819957733}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T16:31:48,320 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733761908274Initializing all the Stores at 1733761908277 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733761908277Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733761908279 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733761908279Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733761908279Cleaning up temporary data from old regions at 1733761908307 (+28 ms)Region opened successfully at 1733761908320 (+13 ms) 2024-12-09T16:31:48,320 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T16:31:48,320 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T16:31:48,321 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T16:31:48,321 DEBUG [RS:0;dfce6ce5b10c:36171 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T16:31:48,321 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T16:31:48,321 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T16:31:48,321 DEBUG [RS:0;dfce6ce5b10c:36171 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15b774c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=dfce6ce5b10c/172.17.0.2:0 2024-12-09T16:31:48,323 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T16:31:48,323 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733761908320Disabling compacts and flushes for region at 1733761908320Disabling writes for close at 1733761908321 (+1 ms)Writing region close event to WAL at 1733761908322 (+1 ms)Closed at 1733761908322 2024-12-09T16:31:48,328 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T16:31:48,328 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T16:31:48,328 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T16:31:48,334 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T16:31:48,338 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T16:31:48,342 DEBUG [RS:0;dfce6ce5b10c:36171 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;dfce6ce5b10c:36171 2024-12-09T16:31:48,342 INFO [RS:0;dfce6ce5b10c:36171 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T16:31:48,343 INFO [RS:0;dfce6ce5b10c:36171 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T16:31:48,343 DEBUG [RS:0;dfce6ce5b10c:36171 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T16:31:48,344 INFO [RS:0;dfce6ce5b10c:36171 {}] regionserver.HRegionServer(2659): reportForDuty to master=dfce6ce5b10c,41579,1733761907134 with port=36171, startcode=1733761907310 2024-12-09T16:31:48,344 DEBUG [RS:0;dfce6ce5b10c:36171 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T16:31:48,348 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35527, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T16:31:48,349 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41579 {}] master.ServerManager(363): Checking decommissioned status of RegionServer dfce6ce5b10c,36171,1733761907310 2024-12-09T16:31:48,350 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41579 {}] master.ServerManager(517): Registering regionserver=dfce6ce5b10c,36171,1733761907310 2024-12-09T16:31:48,354 DEBUG [RS:0;dfce6ce5b10c:36171 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301 2024-12-09T16:31:48,355 DEBUG [RS:0;dfce6ce5b10c:36171 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34925 2024-12-09T16:31:48,358 DEBUG [RS:0;dfce6ce5b10c:36171 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T16:31:48,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T16:31:48,368 DEBUG [RS:0;dfce6ce5b10c:36171 {}] zookeeper.ZKUtil(111): regionserver:36171-0x100bfc5384c0001, quorum=127.0.0.1:59857, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/dfce6ce5b10c,36171,1733761907310 2024-12-09T16:31:48,368 WARN [RS:0;dfce6ce5b10c:36171 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T16:31:48,368 INFO [RS:0;dfce6ce5b10c:36171 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T16:31:48,369 DEBUG [RS:0;dfce6ce5b10c:36171 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/WALs/dfce6ce5b10c,36171,1733761907310 2024-12-09T16:31:48,369 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [dfce6ce5b10c,36171,1733761907310] 2024-12-09T16:31:48,381 INFO [RS:0;dfce6ce5b10c:36171 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T16:31:48,386 INFO [RS:0;dfce6ce5b10c:36171 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T16:31:48,390 INFO [RS:0;dfce6ce5b10c:36171 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T16:31:48,390 INFO [RS:0;dfce6ce5b10c:36171 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:48,398 INFO [RS:0;dfce6ce5b10c:36171 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T16:31:48,399 INFO [RS:0;dfce6ce5b10c:36171 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T16:31:48,400 INFO [RS:0;dfce6ce5b10c:36171 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:48,400 DEBUG [RS:0;dfce6ce5b10c:36171 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:48,400 DEBUG [RS:0;dfce6ce5b10c:36171 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:48,400 DEBUG [RS:0;dfce6ce5b10c:36171 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:48,400 DEBUG [RS:0;dfce6ce5b10c:36171 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:48,401 DEBUG [RS:0;dfce6ce5b10c:36171 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:48,401 DEBUG [RS:0;dfce6ce5b10c:36171 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/dfce6ce5b10c:0, corePoolSize=2, maxPoolSize=2 2024-12-09T16:31:48,401 DEBUG [RS:0;dfce6ce5b10c:36171 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:48,401 DEBUG [RS:0;dfce6ce5b10c:36171 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:48,401 DEBUG [RS:0;dfce6ce5b10c:36171 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:48,401 DEBUG [RS:0;dfce6ce5b10c:36171 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:48,401 DEBUG [RS:0;dfce6ce5b10c:36171 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:48,402 DEBUG [RS:0;dfce6ce5b10c:36171 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:48,402 DEBUG [RS:0;dfce6ce5b10c:36171 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/dfce6ce5b10c:0, corePoolSize=3, maxPoolSize=3 2024-12-09T16:31:48,402 DEBUG [RS:0;dfce6ce5b10c:36171 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0, corePoolSize=3, maxPoolSize=3 2024-12-09T16:31:48,407 INFO [RS:0;dfce6ce5b10c:36171 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:48,408 INFO [RS:0;dfce6ce5b10c:36171 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:48,408 INFO [RS:0;dfce6ce5b10c:36171 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:48,408 INFO [RS:0;dfce6ce5b10c:36171 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:48,408 INFO [RS:0;dfce6ce5b10c:36171 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:48,408 INFO [RS:0;dfce6ce5b10c:36171 {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,36171,1733761907310-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T16:31:48,437 INFO [RS:0;dfce6ce5b10c:36171 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T16:31:48,438 INFO [RS:0;dfce6ce5b10c:36171 {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,36171,1733761907310-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:48,438 INFO [RS:0;dfce6ce5b10c:36171 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:48,439 INFO [RS:0;dfce6ce5b10c:36171 {}] regionserver.Replication(171): dfce6ce5b10c,36171,1733761907310 started 2024-12-09T16:31:48,466 INFO [RS:0;dfce6ce5b10c:36171 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:48,467 INFO [RS:0;dfce6ce5b10c:36171 {}] regionserver.HRegionServer(1482): Serving as dfce6ce5b10c,36171,1733761907310, RpcServer on dfce6ce5b10c/172.17.0.2:36171, sessionid=0x100bfc5384c0001 2024-12-09T16:31:48,468 DEBUG [RS:0;dfce6ce5b10c:36171 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T16:31:48,468 DEBUG [RS:0;dfce6ce5b10c:36171 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager dfce6ce5b10c,36171,1733761907310 2024-12-09T16:31:48,468 DEBUG [RS:0;dfce6ce5b10c:36171 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'dfce6ce5b10c,36171,1733761907310' 2024-12-09T16:31:48,468 DEBUG [RS:0;dfce6ce5b10c:36171 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T16:31:48,469 DEBUG [RS:0;dfce6ce5b10c:36171 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T16:31:48,475 DEBUG [RS:0;dfce6ce5b10c:36171 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T16:31:48,475 DEBUG [RS:0;dfce6ce5b10c:36171 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T16:31:48,475 DEBUG [RS:0;dfce6ce5b10c:36171 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager dfce6ce5b10c,36171,1733761907310 2024-12-09T16:31:48,475 DEBUG [RS:0;dfce6ce5b10c:36171 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'dfce6ce5b10c,36171,1733761907310' 2024-12-09T16:31:48,476 DEBUG [RS:0;dfce6ce5b10c:36171 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T16:31:48,477 DEBUG [RS:0;dfce6ce5b10c:36171 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T16:31:48,483 DEBUG [RS:0;dfce6ce5b10c:36171 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T16:31:48,483 INFO [RS:0;dfce6ce5b10c:36171 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T16:31:48,483 INFO [RS:0;dfce6ce5b10c:36171 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T16:31:48,488 WARN [dfce6ce5b10c:41579 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T16:31:48,592 INFO [RS:0;dfce6ce5b10c:36171 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=dfce6ce5b10c%2C36171%2C1733761907310, suffix=, logDir=hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/WALs/dfce6ce5b10c,36171,1733761907310, archiveDir=hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/oldWALs, maxLogs=32 2024-12-09T16:31:48,595 INFO [RS:0;dfce6ce5b10c:36171 {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C36171%2C1733761907310.1733761908594 2024-12-09T16:31:48,688 INFO [RS:0;dfce6ce5b10c:36171 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/WALs/dfce6ce5b10c,36171,1733761907310/dfce6ce5b10c%2C36171%2C1733761907310.1733761908594 2024-12-09T16:31:48,770 DEBUG [RS:0;dfce6ce5b10c:36171 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:32967:32967),(127.0.0.1/127.0.0.1:41075:41075)] 2024-12-09T16:31:48,989 DEBUG [dfce6ce5b10c:41579 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T16:31:48,989 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=dfce6ce5b10c,36171,1733761907310 2024-12-09T16:31:48,991 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as dfce6ce5b10c,36171,1733761907310, state=OPENING 2024-12-09T16:31:49,029 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T16:31:49,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:49,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36171-0x100bfc5384c0001, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:49,038 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:31:49,039 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:31:49,039 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T16:31:49,039 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=dfce6ce5b10c,36171,1733761907310}] 2024-12-09T16:31:49,194 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T16:31:49,197 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50859, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T16:31:49,203 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T16:31:49,203 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T16:31:49,207 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=dfce6ce5b10c%2C36171%2C1733761907310.meta, suffix=.meta, logDir=hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/WALs/dfce6ce5b10c,36171,1733761907310, archiveDir=hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/oldWALs, maxLogs=32 2024-12-09T16:31:49,208 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C36171%2C1733761907310.meta.1733761909208.meta 2024-12-09T16:31:49,216 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/WALs/dfce6ce5b10c,36171,1733761907310/dfce6ce5b10c%2C36171%2C1733761907310.meta.1733761909208.meta 2024-12-09T16:31:49,217 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41075:41075),(127.0.0.1/127.0.0.1:32967:32967)] 2024-12-09T16:31:49,217 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T16:31:49,218 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T16:31:49,218 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T16:31:49,218 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T16:31:49,218 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T16:31:49,218 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:31:49,218 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T16:31:49,218 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T16:31:49,220 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T16:31:49,221 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T16:31:49,221 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:31:49,222 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:31:49,222 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T16:31:49,223 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T16:31:49,223 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:31:49,224 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:31:49,224 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T16:31:49,225 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T16:31:49,225 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:31:49,226 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:31:49,226 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T16:31:49,227 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T16:31:49,228 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:31:49,228 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:31:49,228 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T16:31:49,230 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/data/hbase/meta/1588230740 2024-12-09T16:31:49,232 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/data/hbase/meta/1588230740 2024-12-09T16:31:49,234 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T16:31:49,234 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T16:31:49,235 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T16:31:49,237 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T16:31:49,239 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=779622, jitterRate=-0.008659839630126953}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T16:31:49,239 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T16:31:49,241 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733761909218Writing region info on filesystem at 1733761909219 (+1 ms)Initializing all the Stores at 1733761909220 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733761909220Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733761909220Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733761909220Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733761909220Cleaning up temporary data from old regions at 1733761909234 (+14 ms)Running coprocessor post-open hooks at 1733761909239 (+5 ms)Region opened successfully at 1733761909241 (+2 ms) 2024-12-09T16:31:49,242 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733761909194 2024-12-09T16:31:49,246 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T16:31:49,246 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T16:31:49,247 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=dfce6ce5b10c,36171,1733761907310 2024-12-09T16:31:49,249 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as dfce6ce5b10c,36171,1733761907310, state=OPEN 2024-12-09T16:31:49,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T16:31:49,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36171-0x100bfc5384c0001, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T16:31:49,310 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=dfce6ce5b10c,36171,1733761907310 2024-12-09T16:31:49,310 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:31:49,310 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:31:49,315 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T16:31:49,315 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=dfce6ce5b10c,36171,1733761907310 in 271 msec 2024-12-09T16:31:49,319 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T16:31:49,319 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 987 msec 2024-12-09T16:31:49,321 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T16:31:49,321 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T16:31:49,323 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T16:31:49,323 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=dfce6ce5b10c,36171,1733761907310, seqNum=-1] 2024-12-09T16:31:49,324 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T16:31:49,326 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51725, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T16:31:49,335 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0980 sec 2024-12-09T16:31:49,335 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733761909335, completionTime=-1 2024-12-09T16:31:49,336 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T16:31:49,336 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T16:31:49,338 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T16:31:49,338 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733761969338 2024-12-09T16:31:49,338 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733762029338 2024-12-09T16:31:49,338 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-09T16:31:49,339 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,41579,1733761907134-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:49,339 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,41579,1733761907134-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:49,339 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,41579,1733761907134-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:49,339 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-dfce6ce5b10c:41579, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:49,339 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:49,339 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:49,341 DEBUG [master/dfce6ce5b10c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T16:31:49,346 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.895sec 2024-12-09T16:31:49,346 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T16:31:49,346 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T16:31:49,346 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T16:31:49,346 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T16:31:49,346 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T16:31:49,346 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,41579,1733761907134-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T16:31:49,346 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,41579,1733761907134-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T16:31:49,350 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T16:31:49,350 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T16:31:49,350 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,41579,1733761907134-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:49,395 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3dc6671d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T16:31:49,396 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request dfce6ce5b10c,41579,-1 for getting cluster id 2024-12-09T16:31:49,396 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T16:31:49,398 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7a4a09f6-8135-45a6-8dfa-0d9473b79e48' 2024-12-09T16:31:49,399 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T16:31:49,399 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7a4a09f6-8135-45a6-8dfa-0d9473b79e48" 2024-12-09T16:31:49,400 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52012dd0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T16:31:49,400 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [dfce6ce5b10c,41579,-1] 2024-12-09T16:31:49,401 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T16:31:49,402 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:31:49,404 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51328, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T16:31:49,406 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@549bc3ef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T16:31:49,406 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T16:31:49,410 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=dfce6ce5b10c,36171,1733761907310, seqNum=-1] 2024-12-09T16:31:49,410 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T16:31:49,413 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56146, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T16:31:49,417 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=dfce6ce5b10c,41579,1733761907134 2024-12-09T16:31:49,418 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:31:49,423 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T16:31:49,423 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T16:31:49,423 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T16:31:49,423 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T16:31:49,423 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:31:49,423 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:31:49,423 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T16:31:49,423 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T16:31:49,424 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=477059495, stopped=false 2024-12-09T16:31:49,424 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=dfce6ce5b10c,41579,1733761907134 2024-12-09T16:31:49,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T16:31:49,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36171-0x100bfc5384c0001, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T16:31:49,446 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T16:31:49,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:49,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36171-0x100bfc5384c0001, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:49,447 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:31:49,447 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36171-0x100bfc5384c0001, quorum=127.0.0.1:59857, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:31:49,447 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T16:31:49,447 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T16:31:49,447 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:31:49,447 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'dfce6ce5b10c,36171,1733761907310' ***** 2024-12-09T16:31:49,448 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T16:31:49,448 INFO [RS:0;dfce6ce5b10c:36171 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T16:31:49,448 INFO [RS:0;dfce6ce5b10c:36171 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T16:31:49,448 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T16:31:49,448 INFO [RS:0;dfce6ce5b10c:36171 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T16:31:49,448 INFO [RS:0;dfce6ce5b10c:36171 {}] regionserver.HRegionServer(959): stopping server dfce6ce5b10c,36171,1733761907310 2024-12-09T16:31:49,448 INFO [RS:0;dfce6ce5b10c:36171 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T16:31:49,448 INFO [RS:0;dfce6ce5b10c:36171 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;dfce6ce5b10c:36171. 2024-12-09T16:31:49,448 DEBUG [RS:0;dfce6ce5b10c:36171 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T16:31:49,448 DEBUG [RS:0;dfce6ce5b10c:36171 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:31:49,449 INFO [RS:0;dfce6ce5b10c:36171 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T16:31:49,449 INFO [RS:0;dfce6ce5b10c:36171 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T16:31:49,449 INFO [RS:0;dfce6ce5b10c:36171 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T16:31:49,449 INFO [RS:0;dfce6ce5b10c:36171 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T16:31:49,449 INFO [RS:0;dfce6ce5b10c:36171 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T16:31:49,449 DEBUG [RS:0;dfce6ce5b10c:36171 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-09T16:31:49,450 DEBUG [RS:0;dfce6ce5b10c:36171 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T16:31:49,450 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T16:31:49,450 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T16:31:49,450 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T16:31:49,450 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T16:31:49,450 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T16:31:49,450 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-09T16:31:49,475 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/data/hbase/meta/1588230740/.tmp/ns/a38d4898b0544a479120287417e8e235 is 43, key is default/ns:d/1733761909326/Put/seqid=0 2024-12-09T16:31:49,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37945 is added to blk_1073741835_1011 (size=5153) 2024-12-09T16:31:49,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42131 is added to blk_1073741835_1011 (size=5153) 2024-12-09T16:31:49,483 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/data/hbase/meta/1588230740/.tmp/ns/a38d4898b0544a479120287417e8e235 2024-12-09T16:31:49,495 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/data/hbase/meta/1588230740/.tmp/ns/a38d4898b0544a479120287417e8e235 as hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/data/hbase/meta/1588230740/ns/a38d4898b0544a479120287417e8e235 2024-12-09T16:31:49,505 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/data/hbase/meta/1588230740/ns/a38d4898b0544a479120287417e8e235, entries=2, sequenceid=6, filesize=5.0 K 2024-12-09T16:31:49,507 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 57ms, sequenceid=6, compaction requested=false 2024-12-09T16:31:49,507 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T16:31:49,525 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T16:31:49,526 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T16:31:49,526 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T16:31:49,527 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733761909449Running coprocessor pre-close hooks at 1733761909449Disabling compacts and flushes for region at 1733761909449Disabling writes for close at 1733761909450 (+1 ms)Obtaining lock to block concurrent updates at 1733761909450Preparing flush snapshotting stores in 1588230740 at 1733761909450Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733761909451 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733761909452 (+1 ms)Flushing 1588230740/ns: creating writer at 1733761909452Flushing 1588230740/ns: appending metadata at 1733761909474 (+22 ms)Flushing 1588230740/ns: closing flushed file at 1733761909474Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5da098d: reopening flushed file at 1733761909492 (+18 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 57ms, sequenceid=6, compaction requested=false at 1733761909507 (+15 ms)Writing region close event to WAL at 1733761909518 (+11 ms)Running coprocessor post-close hooks at 1733761909526 (+8 ms)Closed at 1733761909526 2024-12-09T16:31:49,527 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T16:31:49,638 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T16:31:49,639 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T16:31:49,639 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-09T16:31:49,650 INFO [RS:0;dfce6ce5b10c:36171 {}] regionserver.HRegionServer(976): stopping server dfce6ce5b10c,36171,1733761907310; all regions closed. 2024-12-09T16:31:49,650 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:49,651 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:49,651 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:49,651 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:49,651 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:49,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42131 is added to blk_1073741834_1010 (size=1152) 2024-12-09T16:31:49,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37945 is added to blk_1073741834_1010 (size=1152) 2024-12-09T16:31:49,660 DEBUG [RS:0;dfce6ce5b10c:36171 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/oldWALs 2024-12-09T16:31:49,660 INFO [RS:0;dfce6ce5b10c:36171 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog dfce6ce5b10c%2C36171%2C1733761907310.meta:.meta(num 1733761909208) 2024-12-09T16:31:49,661 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:49,661 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:49,661 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:49,661 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:49,661 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:49,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37945 is added to blk_1073741833_1009 (size=93) 2024-12-09T16:31:49,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42131 is added to blk_1073741833_1009 (size=93) 2024-12-09T16:31:49,667 DEBUG [RS:0;dfce6ce5b10c:36171 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/oldWALs 2024-12-09T16:31:49,667 INFO [RS:0;dfce6ce5b10c:36171 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog dfce6ce5b10c%2C36171%2C1733761907310:(num 1733761908594) 2024-12-09T16:31:49,667 DEBUG [RS:0;dfce6ce5b10c:36171 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:31:49,667 INFO [RS:0;dfce6ce5b10c:36171 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T16:31:49,667 INFO [RS:0;dfce6ce5b10c:36171 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T16:31:49,667 INFO [RS:0;dfce6ce5b10c:36171 {}] hbase.ChoreService(370): Chore service for: regionserver/dfce6ce5b10c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T16:31:49,667 INFO [RS:0;dfce6ce5b10c:36171 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T16:31:49,667 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T16:31:49,668 INFO [RS:0;dfce6ce5b10c:36171 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36171 2024-12-09T16:31:49,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T16:31:49,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36171-0x100bfc5384c0001, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/dfce6ce5b10c,36171,1733761907310 2024-12-09T16:31:49,679 INFO [RS:0;dfce6ce5b10c:36171 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T16:31:49,687 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [dfce6ce5b10c,36171,1733761907310] 2024-12-09T16:31:49,696 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/dfce6ce5b10c,36171,1733761907310 already deleted, retry=false 2024-12-09T16:31:49,696 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; dfce6ce5b10c,36171,1733761907310 expired; onlineServers=0 2024-12-09T16:31:49,696 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'dfce6ce5b10c,41579,1733761907134' ***** 2024-12-09T16:31:49,696 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T16:31:49,696 INFO [M:0;dfce6ce5b10c:41579 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T16:31:49,696 INFO [M:0;dfce6ce5b10c:41579 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T16:31:49,696 DEBUG [M:0;dfce6ce5b10c:41579 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T16:31:49,696 DEBUG [M:0;dfce6ce5b10c:41579 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T16:31:49,696 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T16:31:49,696 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.large.0-1733761908242 {}] cleaner.HFileCleaner(306): Exit Thread[master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.large.0-1733761908242,5,FailOnTimeoutGroup] 2024-12-09T16:31:49,696 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.small.0-1733761908242 {}] cleaner.HFileCleaner(306): Exit Thread[master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.small.0-1733761908242,5,FailOnTimeoutGroup] 2024-12-09T16:31:49,697 INFO [M:0;dfce6ce5b10c:41579 {}] hbase.ChoreService(370): Chore service for: master/dfce6ce5b10c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T16:31:49,697 INFO [M:0;dfce6ce5b10c:41579 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T16:31:49,697 DEBUG [M:0;dfce6ce5b10c:41579 {}] master.HMaster(1795): Stopping service threads 2024-12-09T16:31:49,697 INFO [M:0;dfce6ce5b10c:41579 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T16:31:49,697 INFO [M:0;dfce6ce5b10c:41579 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T16:31:49,697 INFO [M:0;dfce6ce5b10c:41579 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T16:31:49,697 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T16:31:49,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T16:31:49,707 DEBUG [M:0;dfce6ce5b10c:41579 {}] zookeeper.ZKUtil(347): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T16:31:49,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:49,708 WARN [M:0;dfce6ce5b10c:41579 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T16:31:49,708 INFO [M:0;dfce6ce5b10c:41579 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/.lastflushedseqids 2024-12-09T16:31:49,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37945 is added to blk_1073741836_1012 (size=99) 2024-12-09T16:31:49,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42131 is added to blk_1073741836_1012 (size=99) 2024-12-09T16:31:49,723 INFO [M:0;dfce6ce5b10c:41579 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T16:31:49,723 INFO [M:0;dfce6ce5b10c:41579 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T16:31:49,723 DEBUG [M:0;dfce6ce5b10c:41579 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T16:31:49,723 INFO [M:0;dfce6ce5b10c:41579 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:31:49,723 DEBUG [M:0;dfce6ce5b10c:41579 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:31:49,723 DEBUG [M:0;dfce6ce5b10c:41579 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T16:31:49,723 DEBUG [M:0;dfce6ce5b10c:41579 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:31:49,724 INFO [M:0;dfce6ce5b10c:41579 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-09T16:31:49,750 DEBUG [M:0;dfce6ce5b10c:41579 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1b17c07f49c64541832ea48620a172e8 is 82, key is hbase:meta,,1/info:regioninfo/1733761909247/Put/seqid=0 2024-12-09T16:31:49,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42131 is added to blk_1073741837_1013 (size=5672) 2024-12-09T16:31:49,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37945 is added to blk_1073741837_1013 (size=5672) 2024-12-09T16:31:49,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36171-0x100bfc5384c0001, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:31:49,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36171-0x100bfc5384c0001, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:31:49,788 INFO [RS:0;dfce6ce5b10c:36171 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T16:31:49,788 INFO [RS:0;dfce6ce5b10c:36171 {}] regionserver.HRegionServer(1031): Exiting; stopping=dfce6ce5b10c,36171,1733761907310; zookeeper connection closed. 2024-12-09T16:31:49,790 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1493f17e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1493f17e 2024-12-09T16:31:49,790 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T16:31:50,172 INFO [M:0;dfce6ce5b10c:41579 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1b17c07f49c64541832ea48620a172e8 2024-12-09T16:31:50,197 DEBUG [M:0;dfce6ce5b10c:41579 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8db3b8c615dd4d98b82d7de139bef7fd is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733761909334/Put/seqid=0 2024-12-09T16:31:50,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42131 is added to blk_1073741838_1014 (size=5275) 2024-12-09T16:31:50,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37945 is added to blk_1073741838_1014 (size=5275) 2024-12-09T16:31:50,214 INFO [M:0;dfce6ce5b10c:41579 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8db3b8c615dd4d98b82d7de139bef7fd 2024-12-09T16:31:50,242 DEBUG [M:0;dfce6ce5b10c:41579 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2f6bc0a6a13b41369704f97d73d18eb0 is 69, key is dfce6ce5b10c,36171,1733761907310/rs:state/1733761908350/Put/seqid=0 2024-12-09T16:31:50,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42131 is added to blk_1073741839_1015 (size=5156) 2024-12-09T16:31:50,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37945 is added to blk_1073741839_1015 (size=5156) 2024-12-09T16:31:50,249 INFO [M:0;dfce6ce5b10c:41579 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2f6bc0a6a13b41369704f97d73d18eb0 2024-12-09T16:31:50,273 DEBUG [M:0;dfce6ce5b10c:41579 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/920b248deebc4c19b4a69597e5ad61bd is 52, key is load_balancer_on/state:d/1733761909420/Put/seqid=0 2024-12-09T16:31:50,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42131 is added to blk_1073741840_1016 (size=5056) 2024-12-09T16:31:50,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37945 is added to blk_1073741840_1016 (size=5056) 2024-12-09T16:31:50,288 INFO [M:0;dfce6ce5b10c:41579 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/920b248deebc4c19b4a69597e5ad61bd 2024-12-09T16:31:50,298 DEBUG [M:0;dfce6ce5b10c:41579 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1b17c07f49c64541832ea48620a172e8 as hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1b17c07f49c64541832ea48620a172e8 2024-12-09T16:31:50,308 INFO [M:0;dfce6ce5b10c:41579 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1b17c07f49c64541832ea48620a172e8, entries=8, sequenceid=29, filesize=5.5 K 2024-12-09T16:31:50,310 DEBUG [M:0;dfce6ce5b10c:41579 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8db3b8c615dd4d98b82d7de139bef7fd as hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8db3b8c615dd4d98b82d7de139bef7fd 2024-12-09T16:31:50,319 INFO [M:0;dfce6ce5b10c:41579 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8db3b8c615dd4d98b82d7de139bef7fd, entries=3, sequenceid=29, filesize=5.2 K 2024-12-09T16:31:50,322 DEBUG [M:0;dfce6ce5b10c:41579 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2f6bc0a6a13b41369704f97d73d18eb0 as hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2f6bc0a6a13b41369704f97d73d18eb0 2024-12-09T16:31:50,331 INFO [M:0;dfce6ce5b10c:41579 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2f6bc0a6a13b41369704f97d73d18eb0, entries=1, sequenceid=29, filesize=5.0 K 2024-12-09T16:31:50,333 DEBUG [M:0;dfce6ce5b10c:41579 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/920b248deebc4c19b4a69597e5ad61bd as hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/920b248deebc4c19b4a69597e5ad61bd 2024-12-09T16:31:50,344 INFO [M:0;dfce6ce5b10c:41579 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34925/user/jenkins/test-data/a4dc4d35-a7a6-6381-f591-0784895c8301/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/920b248deebc4c19b4a69597e5ad61bd, entries=1, sequenceid=29, filesize=4.9 K 2024-12-09T16:31:50,346 INFO [M:0;dfce6ce5b10c:41579 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 622ms, sequenceid=29, compaction requested=false 2024-12-09T16:31:50,352 INFO [M:0;dfce6ce5b10c:41579 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:31:50,352 DEBUG [M:0;dfce6ce5b10c:41579 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733761909723Disabling compacts and flushes for region at 1733761909723Disabling writes for close at 1733761909723Obtaining lock to block concurrent updates at 1733761909724 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733761909724Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733761909724Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733761909726 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733761909726Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733761909750 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733761909750Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733761910179 (+429 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733761910196 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733761910196Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733761910222 (+26 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733761910242 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733761910242Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733761910256 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733761910273 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733761910273Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7e0f22c4: reopening flushed file at 1733761910297 (+24 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1aad43e9: reopening flushed file at 1733761910308 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24b7d931: reopening flushed file at 1733761910320 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@77749afe: reopening flushed file at 1733761910331 (+11 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 622ms, sequenceid=29, compaction requested=false at 1733761910346 (+15 ms)Writing region close event to WAL at 1733761910351 (+5 ms)Closed at 1733761910351 2024-12-09T16:31:50,354 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:50,358 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:50,358 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:50,358 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:50,358 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:31:50,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42131 is added to blk_1073741830_1006 (size=10311) 2024-12-09T16:31:50,363 INFO [M:0;dfce6ce5b10c:41579 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T16:31:50,363 INFO [M:0;dfce6ce5b10c:41579 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41579 2024-12-09T16:31:50,364 INFO [M:0;dfce6ce5b10c:41579 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T16:31:50,364 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T16:31:50,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37945 is added to blk_1073741830_1006 (size=10311) 2024-12-09T16:31:50,420 INFO [regionserver/dfce6ce5b10c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T16:31:50,496 INFO [M:0;dfce6ce5b10c:41579 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T16:31:50,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:31:50,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41579-0x100bfc5384c0000, quorum=127.0.0.1:59857, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:31:50,502 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4ce6afee{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:31:50,502 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@62601d9d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:31:50,503 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:31:50,503 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ab4268e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:31:50,503 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ea4d062{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/hadoop.log.dir/,STOPPED} 2024-12-09T16:31:50,507 WARN [BP-482272738-172.17.0.2-1733761904717 heartbeating to localhost/127.0.0.1:34925 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T16:31:50,507 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T16:31:50,507 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T16:31:50,507 WARN [BP-482272738-172.17.0.2-1733761904717 heartbeating to localhost/127.0.0.1:34925 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-482272738-172.17.0.2-1733761904717 (Datanode Uuid 10892b35-845d-4bab-a16f-a7f31d57d5f8) service to localhost/127.0.0.1:34925 2024-12-09T16:31:50,508 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/cluster_1359a182-aff4-32a0-dc2f-6f92bccf3463/data/data3/current/BP-482272738-172.17.0.2-1733761904717 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:31:50,508 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/cluster_1359a182-aff4-32a0-dc2f-6f92bccf3463/data/data4/current/BP-482272738-172.17.0.2-1733761904717 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:31:50,508 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T16:31:50,511 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2d914c2b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:31:50,512 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@75aebd6a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:31:50,512 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:31:50,512 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ba5815a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:31:50,512 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b2eee9a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/hadoop.log.dir/,STOPPED} 2024-12-09T16:31:50,514 WARN [BP-482272738-172.17.0.2-1733761904717 heartbeating to localhost/127.0.0.1:34925 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T16:31:50,514 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T16:31:50,514 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T16:31:50,514 WARN [BP-482272738-172.17.0.2-1733761904717 heartbeating to localhost/127.0.0.1:34925 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-482272738-172.17.0.2-1733761904717 (Datanode Uuid d09c98ff-7a5a-4ba8-8b65-1fc43172de24) service to localhost/127.0.0.1:34925 2024-12-09T16:31:50,515 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/cluster_1359a182-aff4-32a0-dc2f-6f92bccf3463/data/data1/current/BP-482272738-172.17.0.2-1733761904717 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:31:50,515 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/cluster_1359a182-aff4-32a0-dc2f-6f92bccf3463/data/data2/current/BP-482272738-172.17.0.2-1733761904717 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:31:50,516 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T16:31:50,528 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@329eafc0{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T16:31:50,529 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@f407acd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:31:50,529 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:31:50,529 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@53521f59{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:31:50,529 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e09da7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/hadoop.log.dir/,STOPPED} 2024-12-09T16:31:50,537 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T16:31:50,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T16:31:50,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T16:31:50,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/hadoop.log.dir so I do NOT create it in target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d 2024-12-09T16:31:50,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/bce7a272-7929-a8f5-4ca9-53e10e534027/hadoop.tmp.dir so I do NOT create it in target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d 2024-12-09T16:31:50,562 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855, deleteOnExit=true 2024-12-09T16:31:50,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T16:31:50,563 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/test.cache.data in system properties and HBase conf 2024-12-09T16:31:50,563 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T16:31:50,563 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/hadoop.log.dir in system properties and HBase conf 2024-12-09T16:31:50,563 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T16:31:50,563 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T16:31:50,563 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T16:31:50,563 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T16:31:50,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T16:31:50,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T16:31:50,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T16:31:50,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T16:31:50,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T16:31:50,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T16:31:50,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T16:31:50,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T16:31:50,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T16:31:50,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/nfs.dump.dir in system properties and HBase conf 2024-12-09T16:31:50,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/java.io.tmpdir in system properties and HBase conf 2024-12-09T16:31:50,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T16:31:50,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T16:31:50,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T16:31:50,583 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T16:31:50,867 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:31:50,874 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:31:50,878 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:31:50,878 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:31:50,878 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T16:31:50,879 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:31:50,880 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@223874d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:31:50,881 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2e477d02{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:31:50,986 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@54ddfc85{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/java.io.tmpdir/jetty-localhost-40351-hadoop-hdfs-3_4_1-tests_jar-_-any-11669264454903978196/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T16:31:50,987 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7cc16daa{HTTP/1.1, (http/1.1)}{localhost:40351} 2024-12-09T16:31:50,987 INFO [Time-limited test {}] server.Server(415): Started @110152ms 2024-12-09T16:31:51,007 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T16:31:51,254 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:31:51,260 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:31:51,262 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:31:51,262 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:31:51,262 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T16:31:51,264 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7883fae9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:31:51,265 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@646dceab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:31:51,374 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1072886b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/java.io.tmpdir/jetty-localhost-45553-hadoop-hdfs-3_4_1-tests_jar-_-any-15205272329163781102/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:31:51,375 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2804b66{HTTP/1.1, (http/1.1)}{localhost:45553} 2024-12-09T16:31:51,375 INFO [Time-limited test {}] server.Server(415): Started @110541ms 2024-12-09T16:31:51,377 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T16:31:51,444 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:31:51,449 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:31:51,450 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:31:51,450 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:31:51,450 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T16:31:51,451 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5b405f91{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:31:51,452 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@766b5ae3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:31:51,550 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1be4b360{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/java.io.tmpdir/jetty-localhost-43471-hadoop-hdfs-3_4_1-tests_jar-_-any-5536806307206020829/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:31:51,552 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1f7847d8{HTTP/1.1, (http/1.1)}{localhost:43471} 2024-12-09T16:31:51,552 INFO [Time-limited test {}] server.Server(415): Started @110718ms 2024-12-09T16:31:51,554 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T16:31:52,220 WARN [Thread-665 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data1/current/BP-2100637919-172.17.0.2-1733761910595/current, will proceed with Du for space computation calculation, 2024-12-09T16:31:52,222 WARN [Thread-666 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data2/current/BP-2100637919-172.17.0.2-1733761910595/current, will proceed with Du for space computation calculation, 2024-12-09T16:31:52,261 WARN [Thread-629 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T16:31:52,266 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x497a8add7bf3e7f1 with lease ID 0x2522342f5d96fe5: Processing first storage report for DS-537b82e7-990e-4f2a-b129-a6cbd341b343 from datanode DatanodeRegistration(127.0.0.1:40975, datanodeUuid=690754e7-26ac-4861-acf0-3f82009d1f3e, infoPort=46077, infoSecurePort=0, ipcPort=33373, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595) 2024-12-09T16:31:52,266 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x497a8add7bf3e7f1 with lease ID 0x2522342f5d96fe5: from storage DS-537b82e7-990e-4f2a-b129-a6cbd341b343 node DatanodeRegistration(127.0.0.1:40975, datanodeUuid=690754e7-26ac-4861-acf0-3f82009d1f3e, infoPort=46077, infoSecurePort=0, ipcPort=33373, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T16:31:52,266 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x497a8add7bf3e7f1 with lease ID 0x2522342f5d96fe5: Processing first storage report for DS-3e03c21b-2583-486c-95e8-fabd8b0e5640 from datanode DatanodeRegistration(127.0.0.1:40975, datanodeUuid=690754e7-26ac-4861-acf0-3f82009d1f3e, infoPort=46077, infoSecurePort=0, ipcPort=33373, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595) 2024-12-09T16:31:52,266 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x497a8add7bf3e7f1 with lease ID 0x2522342f5d96fe5: from storage DS-3e03c21b-2583-486c-95e8-fabd8b0e5640 node DatanodeRegistration(127.0.0.1:40975, datanodeUuid=690754e7-26ac-4861-acf0-3f82009d1f3e, infoPort=46077, infoSecurePort=0, ipcPort=33373, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:31:52,325 WARN [Thread-676 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data3/current/BP-2100637919-172.17.0.2-1733761910595/current, will proceed with Du for space computation calculation, 2024-12-09T16:31:52,330 WARN [Thread-677 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data4/current/BP-2100637919-172.17.0.2-1733761910595/current, will proceed with Du for space computation calculation, 2024-12-09T16:31:52,362 WARN [Thread-652 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T16:31:52,367 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc4309000569754e with lease ID 0x2522342f5d96fe6: Processing first storage report for DS-84167a42-ec4b-4e4c-a636-257392bdeaa0 from datanode DatanodeRegistration(127.0.0.1:43671, datanodeUuid=07f6efee-23eb-4e00-b421-21c4606f42e4, infoPort=41001, infoSecurePort=0, ipcPort=37123, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595) 2024-12-09T16:31:52,367 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc4309000569754e with lease ID 0x2522342f5d96fe6: from storage DS-84167a42-ec4b-4e4c-a636-257392bdeaa0 node DatanodeRegistration(127.0.0.1:43671, datanodeUuid=07f6efee-23eb-4e00-b421-21c4606f42e4, infoPort=41001, infoSecurePort=0, ipcPort=37123, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:31:52,367 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc4309000569754e with lease ID 0x2522342f5d96fe6: Processing first storage report for DS-8ed61516-bb4e-45b8-af33-413bf2e5281e from datanode DatanodeRegistration(127.0.0.1:43671, datanodeUuid=07f6efee-23eb-4e00-b421-21c4606f42e4, infoPort=41001, infoSecurePort=0, ipcPort=37123, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595) 2024-12-09T16:31:52,367 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc4309000569754e with lease ID 0x2522342f5d96fe6: from storage DS-8ed61516-bb4e-45b8-af33-413bf2e5281e node DatanodeRegistration(127.0.0.1:43671, datanodeUuid=07f6efee-23eb-4e00-b421-21c4606f42e4, infoPort=41001, infoSecurePort=0, ipcPort=37123, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:31:52,409 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d 2024-12-09T16:31:52,418 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/zookeeper_0, clientPort=62591, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T16:31:52,420 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62591 2024-12-09T16:31:52,420 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:31:52,422 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:31:52,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40975 is added to blk_1073741825_1001 (size=7) 2024-12-09T16:31:52,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43671 is added to blk_1073741825_1001 (size=7) 2024-12-09T16:31:52,456 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52 with version=8 2024-12-09T16:31:52,456 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/hbase-staging 2024-12-09T16:31:52,458 INFO [Time-limited test {}] client.ConnectionUtils(128): master/dfce6ce5b10c:0 server-side Connection retries=45 2024-12-09T16:31:52,458 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:31:52,458 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T16:31:52,458 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T16:31:52,459 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:31:52,459 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T16:31:52,459 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T16:31:52,459 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T16:31:52,462 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39507 2024-12-09T16:31:52,465 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39507 connecting to ZooKeeper ensemble=127.0.0.1:62591 2024-12-09T16:31:52,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:395070x0, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T16:31:52,525 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39507-0x100bfc54d170000 connected 2024-12-09T16:31:52,614 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:31:52,619 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:31:52,624 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:31:52,624 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52, hbase.cluster.distributed=false 2024-12-09T16:31:52,628 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T16:31:52,638 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39507 2024-12-09T16:31:52,640 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39507 2024-12-09T16:31:52,641 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39507 2024-12-09T16:31:52,642 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39507 2024-12-09T16:31:52,643 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39507 2024-12-09T16:31:52,670 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/dfce6ce5b10c:0 server-side Connection retries=45 2024-12-09T16:31:52,670 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:31:52,670 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T16:31:52,670 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T16:31:52,670 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:31:52,671 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T16:31:52,671 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T16:31:52,671 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T16:31:52,672 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41215 2024-12-09T16:31:52,674 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41215 connecting to ZooKeeper ensemble=127.0.0.1:62591 2024-12-09T16:31:52,675 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:31:52,679 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:31:52,698 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:412150x0, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T16:31:52,699 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:412150x0, quorum=127.0.0.1:62591, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:31:52,700 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T16:31:52,700 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41215-0x100bfc54d170001 connected 2024-12-09T16:31:52,707 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T16:31:52,709 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41215-0x100bfc54d170001, quorum=127.0.0.1:62591, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T16:31:52,711 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41215-0x100bfc54d170001, quorum=127.0.0.1:62591, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T16:31:52,718 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41215 2024-12-09T16:31:52,723 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41215 2024-12-09T16:31:52,726 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41215 2024-12-09T16:31:52,731 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41215 2024-12-09T16:31:52,731 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41215 2024-12-09T16:31:52,752 DEBUG [M:0;dfce6ce5b10c:39507 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;dfce6ce5b10c:39507 2024-12-09T16:31:52,753 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/dfce6ce5b10c,39507,1733761912458 2024-12-09T16:31:52,764 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41215-0x100bfc54d170001, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:31:52,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:31:52,767 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/dfce6ce5b10c,39507,1733761912458 2024-12-09T16:31:52,780 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:52,780 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41215-0x100bfc54d170001, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T16:31:52,780 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41215-0x100bfc54d170001, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:52,781 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T16:31:52,782 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/dfce6ce5b10c,39507,1733761912458 from backup master directory 2024-12-09T16:31:52,789 WARN [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T16:31:52,789 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=dfce6ce5b10c,39507,1733761912458 2024-12-09T16:31:52,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/dfce6ce5b10c,39507,1733761912458 2024-12-09T16:31:52,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:31:52,790 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41215-0x100bfc54d170001, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:31:52,814 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/hbase.id] with ID: f4b5ac02-3fd0-4525-b4c0-66a613670a50 2024-12-09T16:31:52,814 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/.tmp/hbase.id 2024-12-09T16:31:52,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43671 is added to blk_1073741826_1002 (size=42) 2024-12-09T16:31:52,878 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/.tmp/hbase.id]:[hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/hbase.id] 2024-12-09T16:31:52,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40975 is added to blk_1073741826_1002 (size=42) 2024-12-09T16:31:52,966 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:31:52,966 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T16:31:52,969 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 3ms. 2024-12-09T16:31:52,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:52,981 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41215-0x100bfc54d170001, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:53,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43671 is added to blk_1073741827_1003 (size=196) 2024-12-09T16:31:53,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40975 is added to blk_1073741827_1003 (size=196) 2024-12-09T16:31:53,442 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T16:31:53,444 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T16:31:53,446 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T16:31:53,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43671 is added to blk_1073741828_1004 (size=1189) 2024-12-09T16:31:53,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40975 is added to blk_1073741828_1004 (size=1189) 2024-12-09T16:31:53,543 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/data/master/store 2024-12-09T16:31:53,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40975 is added to blk_1073741829_1005 (size=34) 2024-12-09T16:31:53,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43671 is added to blk_1073741829_1005 (size=34) 2024-12-09T16:31:53,631 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:31:53,631 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T16:31:53,631 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:31:53,631 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:31:53,631 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T16:31:53,631 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:31:53,631 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:31:53,631 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733761913631Disabling compacts and flushes for region at 1733761913631Disabling writes for close at 1733761913631Writing region close event to WAL at 1733761913631Closed at 1733761913631 2024-12-09T16:31:53,649 WARN [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/data/master/store/.initializing 2024-12-09T16:31:53,649 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458 2024-12-09T16:31:53,657 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=dfce6ce5b10c%2C39507%2C1733761912458, suffix=, logDir=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458, archiveDir=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/oldWALs, maxLogs=10 2024-12-09T16:31:53,658 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 2024-12-09T16:31:53,683 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 2024-12-09T16:31:53,692 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41001:41001),(127.0.0.1/127.0.0.1:46077:46077)] 2024-12-09T16:31:53,696 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T16:31:53,696 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:31:53,697 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:31:53,697 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:31:53,702 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:31:53,706 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T16:31:53,706 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:31:53,707 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:31:53,707 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:31:53,710 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T16:31:53,710 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:31:53,712 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:31:53,712 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:31:53,714 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T16:31:53,714 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:31:53,715 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:31:53,716 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:31:53,718 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T16:31:53,718 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:31:53,723 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:31:53,723 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:31:53,725 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:31:53,726 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:31:53,732 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:31:53,732 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:31:53,734 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T16:31:53,738 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:31:53,772 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T16:31:53,778 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=856177, jitterRate=0.08868592977523804}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T16:31:53,779 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733761913697Initializing all the Stores at 1733761913699 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733761913699Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733761913702 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733761913702Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733761913702Cleaning up temporary data from old regions at 1733761913732 (+30 ms)Region opened successfully at 1733761913779 (+47 ms) 2024-12-09T16:31:53,779 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T16:31:53,794 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c9929a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=dfce6ce5b10c/172.17.0.2:0 2024-12-09T16:31:53,798 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T16:31:53,799 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T16:31:53,799 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T16:31:53,799 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T16:31:53,800 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-09T16:31:53,801 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T16:31:53,801 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T16:31:53,815 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T16:31:53,816 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T16:31:53,855 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T16:31:53,856 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T16:31:53,859 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T16:31:53,866 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T16:31:53,867 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T16:31:53,868 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T16:31:53,873 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T16:31:53,875 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T16:31:53,880 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T16:31:53,884 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T16:31:53,889 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T16:31:53,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T16:31:53,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:53,899 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41215-0x100bfc54d170001, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T16:31:53,899 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41215-0x100bfc54d170001, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:53,900 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=dfce6ce5b10c,39507,1733761912458, sessionid=0x100bfc54d170000, setting cluster-up flag (Was=false) 2024-12-09T16:31:53,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:53,914 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41215-0x100bfc54d170001, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:53,939 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T16:31:53,941 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=dfce6ce5b10c,39507,1733761912458 2024-12-09T16:31:53,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:53,966 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41215-0x100bfc54d170001, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:53,989 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T16:31:53,991 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=dfce6ce5b10c,39507,1733761912458 2024-12-09T16:31:54,004 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T16:31:54,012 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T16:31:54,013 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T16:31:54,013 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T16:31:54,013 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: dfce6ce5b10c,39507,1733761912458 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T16:31:54,015 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/dfce6ce5b10c:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:31:54,015 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/dfce6ce5b10c:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:31:54,015 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:31:54,015 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:31:54,016 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/dfce6ce5b10c:0, corePoolSize=10, maxPoolSize=10 2024-12-09T16:31:54,016 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:54,016 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=2, maxPoolSize=2 2024-12-09T16:31:54,016 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:54,030 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733761944030 2024-12-09T16:31:54,030 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T16:31:54,030 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T16:31:54,031 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T16:31:54,031 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T16:31:54,031 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T16:31:54,031 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T16:31:54,031 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T16:31:54,031 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T16:31:54,035 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:31:54,036 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T16:31:54,049 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:54,050 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T16:31:54,050 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T16:31:54,050 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T16:31:54,053 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T16:31:54,053 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T16:31:54,058 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.large.0-1733761914053,5,FailOnTimeoutGroup] 2024-12-09T16:31:54,058 INFO [RS:0;dfce6ce5b10c:41215 {}] regionserver.HRegionServer(746): ClusterId : f4b5ac02-3fd0-4525-b4c0-66a613670a50 2024-12-09T16:31:54,058 DEBUG [RS:0;dfce6ce5b10c:41215 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T16:31:54,061 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.small.0-1733761914058,5,FailOnTimeoutGroup] 2024-12-09T16:31:54,061 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:54,061 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T16:31:54,061 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:54,062 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:54,065 DEBUG [RS:0;dfce6ce5b10c:41215 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T16:31:54,065 DEBUG [RS:0;dfce6ce5b10c:41215 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T16:31:54,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43671 is added to blk_1073741831_1007 (size=1321) 2024-12-09T16:31:54,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40975 is added to blk_1073741831_1007 (size=1321) 2024-12-09T16:31:54,075 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T16:31:54,076 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52 2024-12-09T16:31:54,087 DEBUG [RS:0;dfce6ce5b10c:41215 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T16:31:54,088 DEBUG [RS:0;dfce6ce5b10c:41215 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ca9c347, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=dfce6ce5b10c/172.17.0.2:0 2024-12-09T16:31:54,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43671 is added to blk_1073741832_1008 (size=32) 2024-12-09T16:31:54,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40975 is added to blk_1073741832_1008 (size=32) 2024-12-09T16:31:54,117 DEBUG [RS:0;dfce6ce5b10c:41215 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;dfce6ce5b10c:41215 2024-12-09T16:31:54,117 INFO [RS:0;dfce6ce5b10c:41215 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T16:31:54,117 INFO [RS:0;dfce6ce5b10c:41215 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T16:31:54,117 DEBUG [RS:0;dfce6ce5b10c:41215 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T16:31:54,118 INFO [RS:0;dfce6ce5b10c:41215 {}] regionserver.HRegionServer(2659): reportForDuty to master=dfce6ce5b10c,39507,1733761912458 with port=41215, startcode=1733761912669 2024-12-09T16:31:54,118 DEBUG [RS:0;dfce6ce5b10c:41215 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T16:31:54,123 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38893, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T16:31:54,124 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39507 {}] master.ServerManager(363): Checking decommissioned status of RegionServer dfce6ce5b10c,41215,1733761912669 2024-12-09T16:31:54,124 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39507 {}] master.ServerManager(517): Registering regionserver=dfce6ce5b10c,41215,1733761912669 2024-12-09T16:31:54,127 DEBUG [RS:0;dfce6ce5b10c:41215 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52 2024-12-09T16:31:54,127 DEBUG [RS:0;dfce6ce5b10c:41215 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38821 2024-12-09T16:31:54,128 DEBUG [RS:0;dfce6ce5b10c:41215 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T16:31:54,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T16:31:54,140 DEBUG [RS:0;dfce6ce5b10c:41215 {}] zookeeper.ZKUtil(111): regionserver:41215-0x100bfc54d170001, quorum=127.0.0.1:62591, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/dfce6ce5b10c,41215,1733761912669 2024-12-09T16:31:54,140 WARN [RS:0;dfce6ce5b10c:41215 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T16:31:54,140 INFO [RS:0;dfce6ce5b10c:41215 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T16:31:54,141 DEBUG [RS:0;dfce6ce5b10c:41215 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669 2024-12-09T16:31:54,143 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [dfce6ce5b10c,41215,1733761912669] 2024-12-09T16:31:54,166 INFO [RS:0;dfce6ce5b10c:41215 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T16:31:54,246 INFO [RS:0;dfce6ce5b10c:41215 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T16:31:54,255 INFO [RS:0;dfce6ce5b10c:41215 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T16:31:54,255 INFO [RS:0;dfce6ce5b10c:41215 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:54,258 INFO [RS:0;dfce6ce5b10c:41215 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T16:31:54,260 INFO [RS:0;dfce6ce5b10c:41215 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T16:31:54,260 INFO [RS:0;dfce6ce5b10c:41215 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:54,260 DEBUG [RS:0;dfce6ce5b10c:41215 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:54,261 DEBUG [RS:0;dfce6ce5b10c:41215 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:54,261 DEBUG [RS:0;dfce6ce5b10c:41215 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:54,261 DEBUG [RS:0;dfce6ce5b10c:41215 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:54,261 DEBUG [RS:0;dfce6ce5b10c:41215 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:54,261 DEBUG [RS:0;dfce6ce5b10c:41215 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/dfce6ce5b10c:0, corePoolSize=2, maxPoolSize=2 2024-12-09T16:31:54,261 DEBUG [RS:0;dfce6ce5b10c:41215 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:54,261 DEBUG [RS:0;dfce6ce5b10c:41215 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:54,261 DEBUG [RS:0;dfce6ce5b10c:41215 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:54,262 DEBUG [RS:0;dfce6ce5b10c:41215 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:54,262 DEBUG [RS:0;dfce6ce5b10c:41215 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:54,262 DEBUG [RS:0;dfce6ce5b10c:41215 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:54,262 DEBUG [RS:0;dfce6ce5b10c:41215 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/dfce6ce5b10c:0, corePoolSize=3, maxPoolSize=3 2024-12-09T16:31:54,262 DEBUG [RS:0;dfce6ce5b10c:41215 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0, corePoolSize=3, maxPoolSize=3 2024-12-09T16:31:54,274 INFO [RS:0;dfce6ce5b10c:41215 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:54,274 INFO [RS:0;dfce6ce5b10c:41215 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:54,274 INFO [RS:0;dfce6ce5b10c:41215 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:54,274 INFO [RS:0;dfce6ce5b10c:41215 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:54,274 INFO [RS:0;dfce6ce5b10c:41215 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:54,275 INFO [RS:0;dfce6ce5b10c:41215 {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,41215,1733761912669-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T16:31:54,305 INFO [RS:0;dfce6ce5b10c:41215 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T16:31:54,306 INFO [RS:0;dfce6ce5b10c:41215 {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,41215,1733761912669-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:54,306 INFO [RS:0;dfce6ce5b10c:41215 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:54,306 INFO [RS:0;dfce6ce5b10c:41215 {}] regionserver.Replication(171): dfce6ce5b10c,41215,1733761912669 started 2024-12-09T16:31:54,326 INFO [RS:0;dfce6ce5b10c:41215 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:54,326 INFO [RS:0;dfce6ce5b10c:41215 {}] regionserver.HRegionServer(1482): Serving as dfce6ce5b10c,41215,1733761912669, RpcServer on dfce6ce5b10c/172.17.0.2:41215, sessionid=0x100bfc54d170001 2024-12-09T16:31:54,326 DEBUG [RS:0;dfce6ce5b10c:41215 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T16:31:54,326 DEBUG [RS:0;dfce6ce5b10c:41215 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager dfce6ce5b10c,41215,1733761912669 2024-12-09T16:31:54,326 DEBUG [RS:0;dfce6ce5b10c:41215 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'dfce6ce5b10c,41215,1733761912669' 2024-12-09T16:31:54,326 DEBUG [RS:0;dfce6ce5b10c:41215 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T16:31:54,327 DEBUG [RS:0;dfce6ce5b10c:41215 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T16:31:54,327 DEBUG [RS:0;dfce6ce5b10c:41215 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T16:31:54,327 DEBUG [RS:0;dfce6ce5b10c:41215 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T16:31:54,327 DEBUG [RS:0;dfce6ce5b10c:41215 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager dfce6ce5b10c,41215,1733761912669 2024-12-09T16:31:54,327 DEBUG [RS:0;dfce6ce5b10c:41215 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'dfce6ce5b10c,41215,1733761912669' 2024-12-09T16:31:54,328 DEBUG [RS:0;dfce6ce5b10c:41215 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T16:31:54,328 DEBUG [RS:0;dfce6ce5b10c:41215 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T16:31:54,328 DEBUG [RS:0;dfce6ce5b10c:41215 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T16:31:54,328 INFO [RS:0;dfce6ce5b10c:41215 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T16:31:54,328 INFO [RS:0;dfce6ce5b10c:41215 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T16:31:54,434 INFO [RS:0;dfce6ce5b10c:41215 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=dfce6ce5b10c%2C41215%2C1733761912669, suffix=, logDir=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669, archiveDir=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/oldWALs, maxLogs=32 2024-12-09T16:31:54,436 INFO [RS:0;dfce6ce5b10c:41215 {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C41215%2C1733761912669.1733761914435 2024-12-09T16:31:54,507 INFO [RS:0;dfce6ce5b10c:41215 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761914435 2024-12-09T16:31:54,513 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:31:54,542 DEBUG [RS:0;dfce6ce5b10c:41215 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46077:46077),(127.0.0.1/127.0.0.1:41001:41001)] 2024-12-09T16:31:54,554 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T16:31:54,562 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T16:31:54,562 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:31:54,574 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:31:54,574 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T16:31:54,582 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T16:31:54,582 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:31:54,590 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:31:54,590 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T16:31:54,600 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T16:31:54,600 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:31:54,601 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:31:54,602 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T16:31:54,607 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T16:31:54,608 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:31:54,608 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:31:54,609 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T16:31:54,610 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/hbase/meta/1588230740 2024-12-09T16:31:54,610 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/hbase/meta/1588230740 2024-12-09T16:31:54,612 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T16:31:54,612 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T16:31:54,613 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T16:31:54,617 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T16:31:54,637 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T16:31:54,638 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=719123, jitterRate=-0.08558839559555054}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T16:31:54,639 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733761914514Initializing all the Stores at 1733761914515 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733761914515Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733761914550 (+35 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733761914550Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733761914550Cleaning up temporary data from old regions at 1733761914612 (+62 ms)Region opened successfully at 1733761914639 (+27 ms) 2024-12-09T16:31:54,640 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T16:31:54,640 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T16:31:54,640 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T16:31:54,640 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T16:31:54,640 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T16:31:54,642 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T16:31:54,643 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733761914639Disabling compacts and flushes for region at 1733761914639Disabling writes for close at 1733761914640 (+1 ms)Writing region close event to WAL at 1733761914642 (+2 ms)Closed at 1733761914642 2024-12-09T16:31:54,645 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T16:31:54,645 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T16:31:54,646 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T16:31:54,658 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T16:31:54,666 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T16:31:54,734 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T16:31:54,739 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:31:54,772 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:31:54,776 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:31:54,777 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:31:54,816 DEBUG [dfce6ce5b10c:39507 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T16:31:54,817 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=dfce6ce5b10c,41215,1733761912669 2024-12-09T16:31:54,831 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as dfce6ce5b10c,41215,1733761912669, state=OPENING 2024-12-09T16:31:54,875 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T16:31:54,890 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41215-0x100bfc54d170001, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:54,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:31:54,898 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:31:54,906 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T16:31:54,907 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=dfce6ce5b10c,41215,1733761912669}] 2024-12-09T16:31:54,910 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:31:55,073 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T16:31:55,086 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40243, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T16:31:55,095 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T16:31:55,096 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T16:31:55,109 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=dfce6ce5b10c%2C41215%2C1733761912669.meta, suffix=.meta, logDir=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669, archiveDir=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/oldWALs, maxLogs=32 2024-12-09T16:31:55,111 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta 2024-12-09T16:31:55,179 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta 2024-12-09T16:31:55,241 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41001:41001),(127.0.0.1/127.0.0.1:46077:46077)] 2024-12-09T16:31:55,274 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T16:31:55,275 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T16:31:55,275 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T16:31:55,275 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T16:31:55,275 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T16:31:55,276 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:31:55,276 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T16:31:55,276 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T16:31:55,286 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T16:31:55,291 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T16:31:55,291 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:31:55,298 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:31:55,298 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T16:31:55,306 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T16:31:55,307 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:31:55,314 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:31:55,314 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T16:31:55,317 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T16:31:55,317 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:31:55,323 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:31:55,323 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T16:31:55,333 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T16:31:55,333 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:31:55,334 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:31:55,336 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T16:31:55,338 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/hbase/meta/1588230740 2024-12-09T16:31:55,343 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/hbase/meta/1588230740 2024-12-09T16:31:55,350 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T16:31:55,350 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T16:31:55,352 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T16:31:55,355 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T16:31:55,358 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=868983, jitterRate=0.10496942698955536}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T16:31:55,358 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T16:31:55,360 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733761915276Writing region info on filesystem at 1733761915276Initializing all the Stores at 1733761915278 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733761915278Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733761915282 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733761915282Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733761915282Cleaning up temporary data from old regions at 1733761915350 (+68 ms)Running coprocessor post-open hooks at 1733761915358 (+8 ms)Region opened successfully at 1733761915359 (+1 ms) 2024-12-09T16:31:55,361 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733761915072 2024-12-09T16:31:55,369 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=dfce6ce5b10c,41215,1733761912669 2024-12-09T16:31:55,373 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as dfce6ce5b10c,41215,1733761912669, state=OPEN 2024-12-09T16:31:55,376 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T16:31:55,377 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T16:31:55,458 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41215-0x100bfc54d170001, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T16:31:55,458 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:31:55,462 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T16:31:55,462 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=dfce6ce5b10c,41215,1733761912669 2024-12-09T16:31:55,462 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:31:55,470 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T16:31:55,470 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=dfce6ce5b10c,41215,1733761912669 in 556 msec 2024-12-09T16:31:55,483 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T16:31:55,483 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 825 msec 2024-12-09T16:31:55,486 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T16:31:55,487 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T16:31:55,489 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T16:31:55,489 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=dfce6ce5b10c,41215,1733761912669, seqNum=-1] 2024-12-09T16:31:55,490 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T16:31:55,493 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51743, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T16:31:55,512 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.4970 sec 2024-12-09T16:31:55,512 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733761915512, completionTime=-1 2024-12-09T16:31:55,512 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T16:31:55,512 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T16:31:55,516 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T16:31:55,516 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733761975516 2024-12-09T16:31:55,516 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733762035516 2024-12-09T16:31:55,516 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 4 msec 2024-12-09T16:31:55,517 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,39507,1733761912458-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:55,517 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,39507,1733761912458-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:55,517 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,39507,1733761912458-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:55,517 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-dfce6ce5b10c:39507, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:55,517 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:55,520 DEBUG [master/dfce6ce5b10c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T16:31:55,522 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:55,529 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.739sec 2024-12-09T16:31:55,530 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T16:31:55,530 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T16:31:55,530 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T16:31:55,530 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T16:31:55,530 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T16:31:55,530 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,39507,1733761912458-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T16:31:55,530 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,39507,1733761912458-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T16:31:55,554 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T16:31:55,554 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T16:31:55,554 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,39507,1733761912458-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:55,558 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69cce1cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T16:31:55,558 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request dfce6ce5b10c,39507,-1 for getting cluster id 2024-12-09T16:31:55,559 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T16:31:55,578 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f4b5ac02-3fd0-4525-b4c0-66a613670a50' 2024-12-09T16:31:55,582 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T16:31:55,582 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f4b5ac02-3fd0-4525-b4c0-66a613670a50" 2024-12-09T16:31:55,583 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e75f655, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T16:31:55,583 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [dfce6ce5b10c,39507,-1] 2024-12-09T16:31:55,584 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T16:31:55,595 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:31:55,596 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50002, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T16:31:55,601 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f5995ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T16:31:55,602 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T16:31:55,607 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=dfce6ce5b10c,41215,1733761912669, seqNum=-1] 2024-12-09T16:31:55,607 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T16:31:55,612 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37748, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T16:31:55,616 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=dfce6ce5b10c,39507,1733761912458 2024-12-09T16:31:55,617 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:31:55,622 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T16:31:55,657 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/dfce6ce5b10c:0 server-side Connection retries=45 2024-12-09T16:31:55,657 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:31:55,657 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T16:31:55,657 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T16:31:55,657 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:31:55,658 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T16:31:55,658 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T16:31:55,658 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T16:31:55,666 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34107 2024-12-09T16:31:55,669 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34107 connecting to ZooKeeper ensemble=127.0.0.1:62591 2024-12-09T16:31:55,671 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:31:55,687 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:31:55,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:341070x0, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T16:31:55,716 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:341070x0, quorum=127.0.0.1:62591, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-12-09T16:31:55,717 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-12-09T16:31:55,718 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34107-0x100bfc54d170002 connected 2024-12-09T16:31:55,718 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T16:31:55,726 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T16:31:55,727 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:34107-0x100bfc54d170002, quorum=127.0.0.1:62591, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T16:31:55,729 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34107-0x100bfc54d170002, quorum=127.0.0.1:62591, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T16:31:55,739 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34107 2024-12-09T16:31:55,739 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34107 2024-12-09T16:31:55,741 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34107 2024-12-09T16:31:55,742 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34107 2024-12-09T16:31:55,743 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34107 2024-12-09T16:31:55,752 INFO [RS:1;dfce6ce5b10c:34107 {}] regionserver.HRegionServer(746): ClusterId : f4b5ac02-3fd0-4525-b4c0-66a613670a50 2024-12-09T16:31:55,752 DEBUG [RS:1;dfce6ce5b10c:34107 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T16:31:55,772 DEBUG [RS:1;dfce6ce5b10c:34107 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T16:31:55,772 DEBUG [RS:1;dfce6ce5b10c:34107 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T16:31:55,783 DEBUG [RS:1;dfce6ce5b10c:34107 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T16:31:55,784 DEBUG [RS:1;dfce6ce5b10c:34107 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6129d80, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=dfce6ce5b10c/172.17.0.2:0 2024-12-09T16:31:55,806 DEBUG [RS:1;dfce6ce5b10c:34107 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;dfce6ce5b10c:34107 2024-12-09T16:31:55,806 INFO [RS:1;dfce6ce5b10c:34107 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T16:31:55,806 INFO [RS:1;dfce6ce5b10c:34107 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T16:31:55,806 DEBUG [RS:1;dfce6ce5b10c:34107 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T16:31:55,808 INFO [RS:1;dfce6ce5b10c:34107 {}] regionserver.HRegionServer(2659): reportForDuty to master=dfce6ce5b10c,39507,1733761912458 with port=34107, startcode=1733761915656 2024-12-09T16:31:55,808 DEBUG [RS:1;dfce6ce5b10c:34107 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T16:31:55,814 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53765, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T16:31:55,815 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39507 {}] master.ServerManager(363): Checking decommissioned status of RegionServer dfce6ce5b10c,34107,1733761915656 2024-12-09T16:31:55,815 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39507 {}] master.ServerManager(517): Registering regionserver=dfce6ce5b10c,34107,1733761915656 2024-12-09T16:31:55,822 DEBUG [RS:1;dfce6ce5b10c:34107 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52 2024-12-09T16:31:55,822 DEBUG [RS:1;dfce6ce5b10c:34107 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38821 2024-12-09T16:31:55,822 DEBUG [RS:1;dfce6ce5b10c:34107 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T16:31:55,840 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T16:31:55,841 DEBUG [RS:1;dfce6ce5b10c:34107 {}] zookeeper.ZKUtil(111): regionserver:34107-0x100bfc54d170002, quorum=127.0.0.1:62591, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/dfce6ce5b10c,34107,1733761915656 2024-12-09T16:31:55,841 WARN [RS:1;dfce6ce5b10c:34107 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T16:31:55,841 INFO [RS:1;dfce6ce5b10c:34107 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T16:31:55,842 DEBUG [RS:1;dfce6ce5b10c:34107 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656 2024-12-09T16:31:55,843 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [dfce6ce5b10c,34107,1733761915656] 2024-12-09T16:31:55,867 INFO [RS:1;dfce6ce5b10c:34107 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T16:31:55,872 INFO [RS:1;dfce6ce5b10c:34107 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T16:31:55,873 INFO [RS:1;dfce6ce5b10c:34107 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T16:31:55,873 INFO [RS:1;dfce6ce5b10c:34107 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:55,876 INFO [RS:1;dfce6ce5b10c:34107 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T16:31:55,878 INFO [RS:1;dfce6ce5b10c:34107 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T16:31:55,878 INFO [RS:1;dfce6ce5b10c:34107 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:55,878 DEBUG [RS:1;dfce6ce5b10c:34107 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:55,878 DEBUG [RS:1;dfce6ce5b10c:34107 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:55,878 DEBUG [RS:1;dfce6ce5b10c:34107 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:55,878 DEBUG [RS:1;dfce6ce5b10c:34107 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:55,879 DEBUG [RS:1;dfce6ce5b10c:34107 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:55,879 DEBUG [RS:1;dfce6ce5b10c:34107 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/dfce6ce5b10c:0, corePoolSize=2, maxPoolSize=2 2024-12-09T16:31:55,879 DEBUG [RS:1;dfce6ce5b10c:34107 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:55,879 DEBUG [RS:1;dfce6ce5b10c:34107 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:55,879 DEBUG [RS:1;dfce6ce5b10c:34107 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:55,879 DEBUG [RS:1;dfce6ce5b10c:34107 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:55,879 DEBUG [RS:1;dfce6ce5b10c:34107 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:55,879 DEBUG [RS:1;dfce6ce5b10c:34107 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:31:55,879 DEBUG [RS:1;dfce6ce5b10c:34107 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/dfce6ce5b10c:0, corePoolSize=3, maxPoolSize=3 2024-12-09T16:31:55,879 DEBUG [RS:1;dfce6ce5b10c:34107 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0, corePoolSize=3, maxPoolSize=3 2024-12-09T16:31:55,881 INFO [RS:1;dfce6ce5b10c:34107 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:55,881 INFO [RS:1;dfce6ce5b10c:34107 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:55,881 INFO [RS:1;dfce6ce5b10c:34107 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:55,881 INFO [RS:1;dfce6ce5b10c:34107 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:55,881 INFO [RS:1;dfce6ce5b10c:34107 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:55,882 INFO [RS:1;dfce6ce5b10c:34107 {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,34107,1733761915656-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T16:31:55,906 INFO [RS:1;dfce6ce5b10c:34107 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T16:31:55,907 INFO [RS:1;dfce6ce5b10c:34107 {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,34107,1733761915656-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:55,907 INFO [RS:1;dfce6ce5b10c:34107 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:55,907 INFO [RS:1;dfce6ce5b10c:34107 {}] regionserver.Replication(171): dfce6ce5b10c,34107,1733761915656 started 2024-12-09T16:31:55,932 INFO [RS:1;dfce6ce5b10c:34107 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:31:55,932 INFO [RS:1;dfce6ce5b10c:34107 {}] regionserver.HRegionServer(1482): Serving as dfce6ce5b10c,34107,1733761915656, RpcServer on dfce6ce5b10c/172.17.0.2:34107, sessionid=0x100bfc54d170002 2024-12-09T16:31:55,932 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;dfce6ce5b10c:34107,5,FailOnTimeoutGroup] 2024-12-09T16:31:55,933 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-12-09T16:31:55,933 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T16:31:55,934 DEBUG [RS:1;dfce6ce5b10c:34107 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T16:31:55,934 DEBUG [RS:1;dfce6ce5b10c:34107 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager dfce6ce5b10c,34107,1733761915656 2024-12-09T16:31:55,934 DEBUG [RS:1;dfce6ce5b10c:34107 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'dfce6ce5b10c,34107,1733761915656' 2024-12-09T16:31:55,934 DEBUG [RS:1;dfce6ce5b10c:34107 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T16:31:55,935 DEBUG [RS:1;dfce6ce5b10c:34107 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T16:31:55,936 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is dfce6ce5b10c,39507,1733761912458 2024-12-09T16:31:55,936 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@125935e0 2024-12-09T16:31:55,936 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T16:31:55,939 DEBUG [RS:1;dfce6ce5b10c:34107 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T16:31:55,939 DEBUG [RS:1;dfce6ce5b10c:34107 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T16:31:55,939 DEBUG [RS:1;dfce6ce5b10c:34107 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager dfce6ce5b10c,34107,1733761915656 2024-12-09T16:31:55,939 DEBUG [RS:1;dfce6ce5b10c:34107 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'dfce6ce5b10c,34107,1733761915656' 2024-12-09T16:31:55,939 DEBUG [RS:1;dfce6ce5b10c:34107 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T16:31:55,942 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50004, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T16:31:55,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39507 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T16:31:55,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39507 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T16:31:55,944 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39507 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T16:31:55,946 DEBUG [RS:1;dfce6ce5b10c:34107 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T16:31:55,947 DEBUG [RS:1;dfce6ce5b10c:34107 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T16:31:55,947 INFO [RS:1;dfce6ce5b10c:34107 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T16:31:55,947 INFO [RS:1;dfce6ce5b10c:34107 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T16:31:55,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39507 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-12-09T16:31:55,962 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T16:31:55,962 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:31:55,963 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39507 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-12-09T16:31:55,965 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T16:31:55,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39507 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T16:31:56,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43671 is added to blk_1073741835_1011 (size=393) 2024-12-09T16:31:56,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40975 is added to blk_1073741835_1011 (size=393) 2024-12-09T16:31:56,020 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1240e0f513578afb8fd45101b7305c2b, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52 2024-12-09T16:31:56,050 INFO [RS:1;dfce6ce5b10c:34107 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=dfce6ce5b10c%2C34107%2C1733761915656, suffix=, logDir=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656, archiveDir=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/oldWALs, maxLogs=32 2024-12-09T16:31:56,052 INFO [RS:1;dfce6ce5b10c:34107 {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 2024-12-09T16:31:56,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43671 is added to blk_1073741836_1012 (size=76) 2024-12-09T16:31:56,103 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:31:56,104 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 1240e0f513578afb8fd45101b7305c2b, disabling compactions & flushes 2024-12-09T16:31:56,104 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b. 2024-12-09T16:31:56,104 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b. 2024-12-09T16:31:56,104 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b. after waiting 0 ms 2024-12-09T16:31:56,104 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b. 2024-12-09T16:31:56,104 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b. 2024-12-09T16:31:56,104 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1240e0f513578afb8fd45101b7305c2b: Waiting for close lock at 1733761916104Disabling compacts and flushes for region at 1733761916104Disabling writes for close at 1733761916104Writing region close event to WAL at 1733761916104Closed at 1733761916104 2024-12-09T16:31:56,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40975 is added to blk_1073741836_1012 (size=76) 2024-12-09T16:31:56,108 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T16:31:56,109 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733761916109"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733761916109"}]},"ts":"1733761916109"} 2024-12-09T16:31:56,124 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T16:31:56,126 INFO [RS:1;dfce6ce5b10c:34107 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 2024-12-09T16:31:56,127 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T16:31:56,127 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733761916127"}]},"ts":"1733761916127"} 2024-12-09T16:31:56,136 DEBUG [RS:1;dfce6ce5b10c:34107 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41001:41001),(127.0.0.1/127.0.0.1:46077:46077)] 2024-12-09T16:31:56,139 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-12-09T16:31:56,140 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=1240e0f513578afb8fd45101b7305c2b, ASSIGN}] 2024-12-09T16:31:56,143 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=1240e0f513578afb8fd45101b7305c2b, ASSIGN 2024-12-09T16:31:56,146 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=1240e0f513578afb8fd45101b7305c2b, ASSIGN; state=OFFLINE, location=dfce6ce5b10c,41215,1733761912669; forceNewPlan=false, retain=false 2024-12-09T16:31:56,298 INFO [dfce6ce5b10c:39507 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T16:31:56,299 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1240e0f513578afb8fd45101b7305c2b, regionState=OPENING, regionLocation=dfce6ce5b10c,41215,1733761912669 2024-12-09T16:31:56,305 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=1240e0f513578afb8fd45101b7305c2b, ASSIGN because future has completed 2024-12-09T16:31:56,313 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1240e0f513578afb8fd45101b7305c2b, server=dfce6ce5b10c,41215,1733761912669}] 2024-12-09T16:31:56,506 INFO [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b. 2024-12-09T16:31:56,506 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 1240e0f513578afb8fd45101b7305c2b, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b.', STARTKEY => '', ENDKEY => ''} 2024-12-09T16:31:56,507 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 1240e0f513578afb8fd45101b7305c2b 2024-12-09T16:31:56,507 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:31:56,507 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 1240e0f513578afb8fd45101b7305c2b 2024-12-09T16:31:56,508 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 1240e0f513578afb8fd45101b7305c2b 2024-12-09T16:31:56,524 INFO [StoreOpener-1240e0f513578afb8fd45101b7305c2b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1240e0f513578afb8fd45101b7305c2b 2024-12-09T16:31:56,535 INFO [StoreOpener-1240e0f513578afb8fd45101b7305c2b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1240e0f513578afb8fd45101b7305c2b columnFamilyName info 2024-12-09T16:31:56,535 DEBUG [StoreOpener-1240e0f513578afb8fd45101b7305c2b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:31:56,544 INFO [StoreOpener-1240e0f513578afb8fd45101b7305c2b-1 {}] regionserver.HStore(327): Store=1240e0f513578afb8fd45101b7305c2b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:31:56,546 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 1240e0f513578afb8fd45101b7305c2b 2024-12-09T16:31:56,554 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b 2024-12-09T16:31:56,558 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b 2024-12-09T16:31:56,562 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 1240e0f513578afb8fd45101b7305c2b 2024-12-09T16:31:56,562 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 1240e0f513578afb8fd45101b7305c2b 2024-12-09T16:31:56,582 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 1240e0f513578afb8fd45101b7305c2b 2024-12-09T16:31:56,622 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T16:31:56,626 INFO [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 1240e0f513578afb8fd45101b7305c2b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=838133, jitterRate=0.06574232876300812}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T16:31:56,626 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1240e0f513578afb8fd45101b7305c2b 2024-12-09T16:31:56,627 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 1240e0f513578afb8fd45101b7305c2b: Running coprocessor pre-open hook at 1733761916508Writing region info on filesystem at 1733761916508Initializing all the Stores at 1733761916520 (+12 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733761916520Cleaning up temporary data from old regions at 1733761916562 (+42 ms)Running coprocessor post-open hooks at 1733761916626 (+64 ms)Region opened successfully at 1733761916627 (+1 ms) 2024-12-09T16:31:56,636 INFO [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b., pid=6, masterSystemTime=1733761916485 2024-12-09T16:31:56,651 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b. 2024-12-09T16:31:56,651 INFO [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b. 2024-12-09T16:31:56,657 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1240e0f513578afb8fd45101b7305c2b, regionState=OPEN, openSeqNum=2, regionLocation=dfce6ce5b10c,41215,1733761912669 2024-12-09T16:31:56,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1240e0f513578afb8fd45101b7305c2b, server=dfce6ce5b10c,41215,1733761912669 because future has completed 2024-12-09T16:31:56,714 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T16:31:56,714 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 1240e0f513578afb8fd45101b7305c2b, server=dfce6ce5b10c,41215,1733761912669 in 370 msec 2024-12-09T16:31:56,769 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T16:31:56,770 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=1240e0f513578afb8fd45101b7305c2b, ASSIGN in 575 msec 2024-12-09T16:31:56,780 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T16:31:56,780 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733761916780"}]},"ts":"1733761916780"} 2024-12-09T16:31:56,789 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-12-09T16:31:56,795 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T16:31:56,813 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 851 msec 2024-12-09T16:31:59,638 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T16:31:59,638 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T16:31:59,639 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-09T16:31:59,639 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-12-09T16:31:59,640 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T16:31:59,640 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T16:32:00,166 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-12-09T16:32:00,779 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T16:32:00,781 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:00,822 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:00,828 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:00,829 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:06,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39507 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T16:32:06,012 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-12-09T16:32:06,012 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-12-09T16:32:06,016 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-12-09T16:32:06,016 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b. 2024-12-09T16:32:06,038 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:32:06,042 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:32:06,054 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:32:06,055 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:32:06,055 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T16:32:06,056 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@27c6c264{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:32:06,056 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60f9e36b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:32:06,182 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5d986ea5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/java.io.tmpdir/jetty-localhost-42261-hadoop-hdfs-3_4_1-tests_jar-_-any-8082322773897230587/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:32:06,182 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@72671a05{HTTP/1.1, (http/1.1)}{localhost:42261} 2024-12-09T16:32:06,182 INFO [Time-limited test {}] server.Server(415): Started @125348ms 2024-12-09T16:32:06,184 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T16:32:06,222 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:32:06,226 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:32:06,237 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:32:06,238 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:32:06,238 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T16:32:06,239 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d714669{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:32:06,239 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28f66f58{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:32:06,374 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2ae0e5c8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/java.io.tmpdir/jetty-localhost-36897-hadoop-hdfs-3_4_1-tests_jar-_-any-15522359066789647921/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:32:06,374 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@61b41358{HTTP/1.1, (http/1.1)}{localhost:36897} 2024-12-09T16:32:06,374 INFO [Time-limited test {}] server.Server(415): Started @125540ms 2024-12-09T16:32:06,376 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T16:32:06,449 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:32:06,461 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:32:06,471 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:32:06,471 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:32:06,471 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T16:32:06,472 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@630c7f7e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:32:06,472 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30b0250{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:32:06,604 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@70fc09c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/java.io.tmpdir/jetty-localhost-34267-hadoop-hdfs-3_4_1-tests_jar-_-any-17569844587196452692/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:32:06,604 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@10ee7da1{HTTP/1.1, (http/1.1)}{localhost:34267} 2024-12-09T16:32:06,604 INFO [Time-limited test {}] server.Server(415): Started @125770ms 2024-12-09T16:32:06,606 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T16:32:07,715 WARN [Thread-859 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data6/current/BP-2100637919-172.17.0.2-1733761910595/current, will proceed with Du for space computation calculation, 2024-12-09T16:32:07,715 WARN [Thread-858 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data5/current/BP-2100637919-172.17.0.2-1733761910595/current, will proceed with Du for space computation calculation, 2024-12-09T16:32:07,736 WARN [Thread-802 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T16:32:07,740 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc2c8b7f8f3f86019 with lease ID 0x2522342f5d96fe7: Processing first storage report for DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868 from datanode DatanodeRegistration(127.0.0.1:37681, datanodeUuid=ac2b4399-bde3-43a0-90ef-f4bb94ae9d6c, infoPort=38147, infoSecurePort=0, ipcPort=37019, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595) 2024-12-09T16:32:07,740 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc2c8b7f8f3f86019 with lease ID 0x2522342f5d96fe7: from storage DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868 node DatanodeRegistration(127.0.0.1:37681, datanodeUuid=ac2b4399-bde3-43a0-90ef-f4bb94ae9d6c, infoPort=38147, infoSecurePort=0, ipcPort=37019, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:32:07,740 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc2c8b7f8f3f86019 with lease ID 0x2522342f5d96fe7: Processing first storage report for DS-9f96fa8f-5468-47a8-b3dd-1788aef01ed5 from datanode DatanodeRegistration(127.0.0.1:37681, datanodeUuid=ac2b4399-bde3-43a0-90ef-f4bb94ae9d6c, infoPort=38147, infoSecurePort=0, ipcPort=37019, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595) 2024-12-09T16:32:07,740 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc2c8b7f8f3f86019 with lease ID 0x2522342f5d96fe7: from storage DS-9f96fa8f-5468-47a8-b3dd-1788aef01ed5 node DatanodeRegistration(127.0.0.1:37681, datanodeUuid=ac2b4399-bde3-43a0-90ef-f4bb94ae9d6c, infoPort=38147, infoSecurePort=0, ipcPort=37019, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:32:08,040 WARN [Thread-872 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data7/current/BP-2100637919-172.17.0.2-1733761910595/current, will proceed with Du for space computation calculation, 2024-12-09T16:32:08,040 WARN [Thread-873 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data8/current/BP-2100637919-172.17.0.2-1733761910595/current, will proceed with Du for space computation calculation, 2024-12-09T16:32:08,055 WARN [Thread-824 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T16:32:08,057 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x31a90eb01dcbed0a with lease ID 0x2522342f5d96fe8: Processing first storage report for DS-63692479-bf45-400c-9f25-6a83d70cd7aa from datanode DatanodeRegistration(127.0.0.1:38075, datanodeUuid=2de7f43d-d7b7-4813-bb44-91e23ae71b8a, infoPort=43979, infoSecurePort=0, ipcPort=36195, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595) 2024-12-09T16:32:08,057 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x31a90eb01dcbed0a with lease ID 0x2522342f5d96fe8: from storage DS-63692479-bf45-400c-9f25-6a83d70cd7aa node DatanodeRegistration(127.0.0.1:38075, datanodeUuid=2de7f43d-d7b7-4813-bb44-91e23ae71b8a, infoPort=43979, infoSecurePort=0, ipcPort=36195, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:32:08,058 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x31a90eb01dcbed0a with lease ID 0x2522342f5d96fe8: Processing first storage report for DS-19c88035-8337-4a32-91bf-60c3370e9c5d from datanode DatanodeRegistration(127.0.0.1:38075, datanodeUuid=2de7f43d-d7b7-4813-bb44-91e23ae71b8a, infoPort=43979, infoSecurePort=0, ipcPort=36195, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595) 2024-12-09T16:32:08,058 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x31a90eb01dcbed0a with lease ID 0x2522342f5d96fe8: from storage DS-19c88035-8337-4a32-91bf-60c3370e9c5d node DatanodeRegistration(127.0.0.1:38075, datanodeUuid=2de7f43d-d7b7-4813-bb44-91e23ae71b8a, infoPort=43979, infoSecurePort=0, ipcPort=36195, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T16:32:08,161 WARN [Thread-884 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data10/current/BP-2100637919-172.17.0.2-1733761910595/current, will proceed with Du for space computation calculation, 2024-12-09T16:32:08,161 WARN [Thread-883 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data9/current/BP-2100637919-172.17.0.2-1733761910595/current, will proceed with Du for space computation calculation, 2024-12-09T16:32:08,182 WARN [Thread-847 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T16:32:08,184 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x409602ea2fd96cc9 with lease ID 0x2522342f5d96fe9: Processing first storage report for DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2 from datanode DatanodeRegistration(127.0.0.1:42103, datanodeUuid=d661179c-c250-402b-a520-ede73037293f, infoPort=40487, infoSecurePort=0, ipcPort=34039, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595) 2024-12-09T16:32:08,185 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x409602ea2fd96cc9 with lease ID 0x2522342f5d96fe9: from storage DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2 node DatanodeRegistration(127.0.0.1:42103, datanodeUuid=d661179c-c250-402b-a520-ede73037293f, infoPort=40487, infoSecurePort=0, ipcPort=34039, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T16:32:08,185 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x409602ea2fd96cc9 with lease ID 0x2522342f5d96fe9: Processing first storage report for DS-7f2135e2-875f-496d-9e2b-ffb1e50e7fb0 from datanode DatanodeRegistration(127.0.0.1:42103, datanodeUuid=d661179c-c250-402b-a520-ede73037293f, infoPort=40487, infoSecurePort=0, ipcPort=34039, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595) 2024-12-09T16:32:08,185 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x409602ea2fd96cc9 with lease ID 0x2522342f5d96fe9: from storage DS-7f2135e2-875f-496d-9e2b-ffb1e50e7fb0 node DatanodeRegistration(127.0.0.1:42103, datanodeUuid=d661179c-c250-402b-a520-ede73037293f, infoPort=40487, infoSecurePort=0, ipcPort=34039, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:32:08,251 WARN [ResponseProcessor for block BP-2100637919-172.17.0.2-1733761910595:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2100637919-172.17.0.2-1733761910595:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:08,251 WARN [ResponseProcessor for block BP-2100637919-172.17.0.2-1733761910595:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2100637919-172.17.0.2-1733761910595:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:08,252 WARN [ResponseProcessor for block BP-2100637919-172.17.0.2-1733761910595:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2100637919-172.17.0.2-1733761910595:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:08,252 WARN [ResponseProcessor for block BP-2100637919-172.17.0.2-1733761910595:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2100637919-172.17.0.2-1733761910595:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-2100637919-172.17.0.2-1733761910595:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:08,252 WARN [DataStreamer for file /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 block BP-2100637919-172.17.0.2-1733761910595:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK], DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK]) is bad. 2024-12-09T16:32:08,252 WARN [DataStreamer for file /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 block BP-2100637919-172.17.0.2-1733761910595:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK], DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK]) is bad. 2024-12-09T16:32:08,252 WARN [DataStreamer for file /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761914435 block BP-2100637919-172.17.0.2-1733761910595:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK], DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK]) is bad. 2024-12-09T16:32:08,252 WARN [DataStreamer for file /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta block BP-2100637919-172.17.0.2-1733761910595:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK], DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK]) is bad. 2024-12-09T16:32:08,253 WARN [PacketResponder: BP-2100637919-172.17.0.2-1733761910595:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43671] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:08,253 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1095561119_22 at /127.0.0.1:49706 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43671:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49706 dst: /127.0.0.1:43671 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:08,253 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1095561119_22 at /127.0.0.1:33676 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40975:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33676 dst: /127.0.0.1:40975 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:08,253 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:33712 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40975:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33712 dst: /127.0.0.1:40975 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:08,253 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_974676548_22 at /127.0.0.1:41442 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:40975:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41442 dst: /127.0.0.1:40975 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:08,254 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:59608 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43671:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59608 dst: /127.0.0.1:43671 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:08,254 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:41398 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:40975:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41398 dst: /127.0.0.1:40975 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:08,254 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:49742 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43671:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49742 dst: /127.0.0.1:43671 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:08,254 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_974676548_22 at /127.0.0.1:59628 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:43671:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59628 dst: /127.0.0.1:43671 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:08,256 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1be4b360{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:32:08,257 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1f7847d8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:32:08,257 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:32:08,257 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@766b5ae3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:32:08,257 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5b405f91{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/hadoop.log.dir/,STOPPED} 2024-12-09T16:32:08,259 WARN [BP-2100637919-172.17.0.2-1733761910595 heartbeating to localhost/127.0.0.1:38821 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T16:32:08,259 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T16:32:08,259 WARN [BP-2100637919-172.17.0.2-1733761910595 heartbeating to localhost/127.0.0.1:38821 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2100637919-172.17.0.2-1733761910595 (Datanode Uuid 07f6efee-23eb-4e00-b421-21c4606f42e4) service to localhost/127.0.0.1:38821 2024-12-09T16:32:08,259 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T16:32:08,259 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data3/current/BP-2100637919-172.17.0.2-1733761910595 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:32:08,260 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data4/current/BP-2100637919-172.17.0.2-1733761910595 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:32:08,260 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T16:32:08,261 WARN [DataStreamer for file /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta block BP-2100637919-172.17.0.2-1733761910595:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:08,264 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@67155b28 {}] datanode.DataXceiver(331): 127.0.0.1:40975:DataXceiver error processing unknown operation src: /127.0.0.1:49482 dst: /127.0.0.1:40975 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:08,265 WARN [DataStreamer for file /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761914435 block BP-2100637919-172.17.0.2-1733761910595:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:08,266 WARN [ResponseProcessor for block BP-2100637919-172.17.0.2-1733761910595:blk_1073741830_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2100637919-172.17.0.2-1733761910595:blk_1073741830_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:08,268 WARN [DataStreamer for file /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 block BP-2100637919-172.17.0.2-1733761910595:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:08,268 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1095561119_22 at /127.0.0.1:49480 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40975:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49480 dst: /127.0.0.1:40975 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:08,270 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1072886b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:32:08,271 WARN [BP-2100637919-172.17.0.2-1733761910595 heartbeating to localhost/127.0.0.1:38821 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2100637919-172.17.0.2-1733761910595 (Datanode Uuid 690754e7-26ac-4861-acf0-3f82009d1f3e) service to localhost/127.0.0.1:38821 2024-12-09T16:32:08,271 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data1/current/BP-2100637919-172.17.0.2-1733761910595 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:32:08,271 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data2/current/BP-2100637919-172.17.0.2-1733761910595 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:32:08,272 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2804b66{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:32:08,272 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:32:08,272 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@646dceab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:32:08,272 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:08,272 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7883fae9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/hadoop.log.dir/,STOPPED} 2024-12-09T16:32:08,272 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:08,274 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T16:32:08,279 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b., hostname=dfce6ce5b10c,41215,1733761912669, seqNum=2] 2024-12-09T16:32:08,280 ERROR [FSHLog-0-hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52-prefix:dfce6ce5b10c,41215,1733761912669 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:08,280 WARN [FSHLog-0-hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52-prefix:dfce6ce5b10c,41215,1733761912669 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:08,281 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog dfce6ce5b10c%2C41215%2C1733761912669:(num 1733761914435) roll requested 2024-12-09T16:32:08,281 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C41215%2C1733761912669.1733761928281 2024-12-09T16:32:08,284 WARN [Thread-898 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:08,285 WARN [Thread-898 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK], DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK]) is bad. 2024-12-09T16:32:08,285 WARN [Thread-898 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741838_1018 2024-12-09T16:32:08,288 WARN [Thread-898 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK] 2024-12-09T16:32:08,296 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:08,296 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:08,296 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:08,296 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:08,296 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:08,297 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761914435 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761928281 2024-12-09T16:32:08,301 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:08,301 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:08,302 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-09T16:32:08,303 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-09T16:32:08,303 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761914435 2024-12-09T16:32:08,305 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38147:38147),(127.0.0.1/127.0.0.1:43979:43979)] 2024-12-09T16:32:08,306 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761914435 is not closed yet, will try archiving it next time 2024-12-09T16:32:08,306 WARN [IPC Server handler 3 on default port 38821 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761914435 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741833_1009 2024-12-09T16:32:08,311 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761914435 after 6ms 2024-12-09T16:32:09,450 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:09,881 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:10,306 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:10,310 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761928281 2024-12-09T16:32:10,311 WARN [ResponseProcessor for block BP-2100637919-172.17.0.2-1733761910595:blk_1073741839_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2100637919-172.17.0.2-1733761910595:blk_1073741839_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:10,311 WARN [DataStreamer for file /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761928281 block BP-2100637919-172.17.0.2-1733761910595:blk_1073741839_1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK], DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK]) is bad. 2024-12-09T16:32:10,312 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:49044 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:37681:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49044 dst: /127.0.0.1:37681 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:10,312 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:47800 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:38075:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47800 dst: /127.0.0.1:38075 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:10,327 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5d986ea5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:32:10,327 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@72671a05{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:32:10,327 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:32:10,327 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60f9e36b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:32:10,327 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@27c6c264{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/hadoop.log.dir/,STOPPED} 2024-12-09T16:32:10,328 WARN [BP-2100637919-172.17.0.2-1733761910595 heartbeating to localhost/127.0.0.1:38821 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T16:32:10,328 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T16:32:10,328 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T16:32:10,328 WARN [BP-2100637919-172.17.0.2-1733761910595 heartbeating to localhost/127.0.0.1:38821 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2100637919-172.17.0.2-1733761910595 (Datanode Uuid ac2b4399-bde3-43a0-90ef-f4bb94ae9d6c) service to localhost/127.0.0.1:38821 2024-12-09T16:32:10,329 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data5/current/BP-2100637919-172.17.0.2-1733761910595 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:32:10,329 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data6/current/BP-2100637919-172.17.0.2-1733761910595 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:32:10,329 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T16:32:11,451 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:11,882 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:12,306 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:12,307 WARN [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]] 2024-12-09T16:32:12,307 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog dfce6ce5b10c%2C41215%2C1733761912669:(num 1733761928281) roll requested 2024-12-09T16:32:12,308 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C41215%2C1733761912669.1733761932308 2024-12-09T16:32:12,312 WARN [Thread-907 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:12,312 WARN [Thread-907 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK], DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]) is bad. 2024-12-09T16:32:12,312 WARN [Thread-907 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741840_1022 2024-12-09T16:32:12,312 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761914435 after 4009ms 2024-12-09T16:32:12,313 WARN [Thread-907 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK] 2024-12-09T16:32:12,315 WARN [Thread-907 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:12,315 WARN [Thread-907 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK], DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK]) is bad. 2024-12-09T16:32:12,315 WARN [Thread-907 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741841_1023 2024-12-09T16:32:12,316 WARN [Thread-907 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK] 2024-12-09T16:32:12,320 WARN [Thread-907 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1024 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37681 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:12,320 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:47824 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741842_1024] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data8]'}, localName='127.0.0.1:38075', datanodeUuid='2de7f43d-d7b7-4813-bb44-91e23ae71b8a', xmitsInProgress=0}:Exception transferring block BP-2100637919-172.17.0.2-1733761910595:blk_1073741842_1024 to mirror 127.0.0.1:37681 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:12,320 WARN [Thread-907 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK], DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK]) is bad. 2024-12-09T16:32:12,320 WARN [Thread-907 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741842_1024 2024-12-09T16:32:12,320 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:47824 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741842_1024] {}] datanode.BlockReceiver(316): Block 1073741842 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T16:32:12,320 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:47824 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:38075:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47824 dst: /127.0.0.1:38075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:12,321 WARN [Thread-907 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK] 2024-12-09T16:32:12,327 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:12,327 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:12,327 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:12,328 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:12,328 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:12,328 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761928281 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761932308 2024-12-09T16:32:12,330 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40487:40487),(127.0.0.1/127.0.0.1:43979:43979)] 2024-12-09T16:32:12,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38075 is added to blk_1073741839_1021 (size=2431) 2024-12-09T16:32:12,331 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761914435 is not closed yet, will try archiving it next time 2024-12-09T16:32:12,331 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761928281 is not closed yet, will try archiving it next time 2024-12-09T16:32:12,334 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:32:12,732 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761914435 is not closed yet, will try archiving it next time 2024-12-09T16:32:13,451 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:13,882 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:14,331 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:14,338 WARN [ResponseProcessor for block BP-2100637919-172.17.0.2-1733761910595:blk_1073741843_1025 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2100637919-172.17.0.2-1733761910595:blk_1073741843_1025 java.io.IOException: Bad response ERROR for BP-2100637919-172.17.0.2-1733761910595:blk_1073741843_1025 from datanode DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:14,338 WARN [DataStreamer for file /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761932308 block BP-2100637919-172.17.0.2-1733761910595:blk_1073741843_1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741843_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK], DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]) is bad. 2024-12-09T16:32:14,339 WARN [PacketResponder: BP-2100637919-172.17.0.2-1733761910595:blk_1073741843_1025, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:38075] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:14,339 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:52582 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:42103:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52582 dst: /127.0.0.1:42103 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:14,339 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:47838 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:38075:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47838 dst: /127.0.0.1:38075 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:14,352 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2ae0e5c8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:32:14,352 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@61b41358{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:32:14,352 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:32:14,353 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28f66f58{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:32:14,353 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d714669{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/hadoop.log.dir/,STOPPED} 2024-12-09T16:32:14,354 WARN [BP-2100637919-172.17.0.2-1733761910595 heartbeating to localhost/127.0.0.1:38821 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T16:32:14,354 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T16:32:14,354 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T16:32:14,354 WARN [BP-2100637919-172.17.0.2-1733761910595 heartbeating to localhost/127.0.0.1:38821 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2100637919-172.17.0.2-1733761910595 (Datanode Uuid 2de7f43d-d7b7-4813-bb44-91e23ae71b8a) service to localhost/127.0.0.1:38821 2024-12-09T16:32:14,355 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data7/current/BP-2100637919-172.17.0.2-1733761910595 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:32:14,355 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data8/current/BP-2100637919-172.17.0.2-1733761910595 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:32:14,355 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T16:32:14,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41215 {}] regionserver.HRegion(8855): Flush requested on 1240e0f513578afb8fd45101b7305c2b 2024-12-09T16:32:14,368 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1240e0f513578afb8fd45101b7305c2b 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T16:32:14,387 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/.tmp/info/8e2537b74a4042cab87e48527662577a is 1080, key is row0002/info:/1733761930330/Put/seqid=0 2024-12-09T16:32:14,389 WARN [Thread-917 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:14,389 WARN [Thread-917 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK], DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK]) is bad. 2024-12-09T16:32:14,389 WARN [Thread-917 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741844_1027 2024-12-09T16:32:14,390 WARN [Thread-917 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK] 2024-12-09T16:32:14,392 WARN [Thread-917 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:14,393 WARN [Thread-917 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK], DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]) is bad. 2024-12-09T16:32:14,393 WARN [Thread-917 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741845_1028 2024-12-09T16:32:14,394 WARN [Thread-917 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK] 2024-12-09T16:32:14,398 WARN [Thread-917 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38075 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:14,398 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:52602 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741846_1029] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data10]'}, localName='127.0.0.1:42103', datanodeUuid='d661179c-c250-402b-a520-ede73037293f', xmitsInProgress=0}:Exception transferring block BP-2100637919-172.17.0.2-1733761910595:blk_1073741846_1029 to mirror 127.0.0.1:38075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:14,398 WARN [Thread-917 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK], DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]) is bad. 2024-12-09T16:32:14,398 WARN [Thread-917 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741846_1029 2024-12-09T16:32:14,398 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:52602 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741846_1029] {}] datanode.BlockReceiver(316): Block 1073741846 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T16:32:14,399 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:52602 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741846_1029] {}] datanode.DataXceiver(331): 127.0.0.1:42103:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52602 dst: /127.0.0.1:42103 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:14,399 WARN [Thread-917 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK] 2024-12-09T16:32:14,402 WARN [Thread-917 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37681 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:14,402 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:52618 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741847_1030] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data10]'}, localName='127.0.0.1:42103', datanodeUuid='d661179c-c250-402b-a520-ede73037293f', xmitsInProgress=0}:Exception transferring block BP-2100637919-172.17.0.2-1733761910595:blk_1073741847_1030 to mirror 127.0.0.1:37681 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:14,403 WARN [Thread-917 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK], DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK]) is bad. 2024-12-09T16:32:14,403 WARN [Thread-917 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741847_1030 2024-12-09T16:32:14,403 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:52618 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741847_1030] {}] datanode.BlockReceiver(316): Block 1073741847 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T16:32:14,403 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:52618 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741847_1030] {}] datanode.DataXceiver(331): 127.0.0.1:42103:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52618 dst: /127.0.0.1:42103 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:14,404 WARN [Thread-917 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK] 2024-12-09T16:32:14,405 WARN [IPC Server handler 4 on default port 38821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T16:32:14,405 WARN [IPC Server handler 4 on default port 38821 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T16:32:14,405 WARN [IPC Server handler 4 on default port 38821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T16:32:14,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741848_1031 (size=10347) 2024-12-09T16:32:14,813 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/.tmp/info/8e2537b74a4042cab87e48527662577a 2024-12-09T16:32:14,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/.tmp/info/8e2537b74a4042cab87e48527662577a as hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/8e2537b74a4042cab87e48527662577a 2024-12-09T16:32:14,830 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/8e2537b74a4042cab87e48527662577a, entries=5, sequenceid=11, filesize=10.1 K 2024-12-09T16:32:14,832 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 1240e0f513578afb8fd45101b7305c2b in 463ms, sequenceid=11, compaction requested=false 2024-12-09T16:32:14,832 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1240e0f513578afb8fd45101b7305c2b: 2024-12-09T16:32:14,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41215 {}] regionserver.HRegion(8855): Flush requested on 1240e0f513578afb8fd45101b7305c2b 2024-12-09T16:32:14,999 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1240e0f513578afb8fd45101b7305c2b 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-12-09T16:32:15,005 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/.tmp/info/ba478df70622483f9b6ce6eec60aaa5d is 1080, key is row0007/info:/1733761934370/Put/seqid=0 2024-12-09T16:32:15,007 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:15,008 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK], DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]) is bad. 2024-12-09T16:32:15,008 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741849_1032 2024-12-09T16:32:15,012 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK] 2024-12-09T16:32:15,014 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:15,014 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK], DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK]) is bad. 2024-12-09T16:32:15,014 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741850_1033 2024-12-09T16:32:15,015 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK] 2024-12-09T16:32:15,019 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40975 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:15,018 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:43162 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741851_1034] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data10]'}, localName='127.0.0.1:42103', datanodeUuid='d661179c-c250-402b-a520-ede73037293f', xmitsInProgress=0}:Exception transferring block BP-2100637919-172.17.0.2-1733761910595:blk_1073741851_1034 to mirror 127.0.0.1:40975 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:15,019 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK], DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]) is bad. 2024-12-09T16:32:15,019 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741851_1034 2024-12-09T16:32:15,019 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:43162 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741851_1034] {}] datanode.BlockReceiver(316): Block 1073741851 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T16:32:15,019 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:43162 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741851_1034] {}] datanode.DataXceiver(331): 127.0.0.1:42103:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43162 dst: /127.0.0.1:42103 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:15,020 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK] 2024-12-09T16:32:15,022 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:15,022 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK], DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK]) is bad. 2024-12-09T16:32:15,022 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741852_1035 2024-12-09T16:32:15,023 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK] 2024-12-09T16:32:15,024 WARN [IPC Server handler 4 on default port 38821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T16:32:15,024 WARN [IPC Server handler 4 on default port 38821 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T16:32:15,024 WARN [IPC Server handler 4 on default port 38821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T16:32:15,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741853_1036 (size=12506) 2024-12-09T16:32:15,431 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/.tmp/info/ba478df70622483f9b6ce6eec60aaa5d 2024-12-09T16:32:15,443 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/.tmp/info/ba478df70622483f9b6ce6eec60aaa5d as hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/ba478df70622483f9b6ce6eec60aaa5d 2024-12-09T16:32:15,451 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/ba478df70622483f9b6ce6eec60aaa5d, entries=7, sequenceid=24, filesize=12.2 K 2024-12-09T16:32:15,452 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:15,453 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 1240e0f513578afb8fd45101b7305c2b in 454ms, sequenceid=24, compaction requested=false 2024-12-09T16:32:15,453 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1240e0f513578afb8fd45101b7305c2b: 2024-12-09T16:32:15,453 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-12-09T16:32:15,453 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T16:32:15,453 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/ba478df70622483f9b6ce6eec60aaa5d because midkey is the same as first or last row 2024-12-09T16:32:15,882 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:16,202 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@61cde5f3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42103, datanodeUuid=d661179c-c250-402b-a520-ede73037293f, infoPort=40487, infoSecurePort=0, ipcPort=34039, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595):Failed to transfer BP-2100637919-172.17.0.2-1733761910595:blk_1073741848_1031 to 127.0.0.1:38075 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:16,202 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@77cf8950[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42103, datanodeUuid=d661179c-c250-402b-a520-ede73037293f, infoPort=40487, infoSecurePort=0, ipcPort=34039, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595):Failed to transfer BP-2100637919-172.17.0.2-1733761910595:blk_1073741853_1036 to 127.0.0.1:37681 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:16,331 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:16,332 WARN [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK]] 2024-12-09T16:32:16,332 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog dfce6ce5b10c%2C41215%2C1733761912669:(num 1733761932308) roll requested 2024-12-09T16:32:16,333 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C41215%2C1733761912669.1733761936332 2024-12-09T16:32:16,338 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40975 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:16,338 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:43170 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741854_1037] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data10]'}, localName='127.0.0.1:42103', datanodeUuid='d661179c-c250-402b-a520-ede73037293f', xmitsInProgress=0}:Exception transferring block BP-2100637919-172.17.0.2-1733761910595:blk_1073741854_1037 to mirror 127.0.0.1:40975 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:16,338 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK], DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]) is bad. 2024-12-09T16:32:16,338 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741854_1037 2024-12-09T16:32:16,338 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:43170 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741854_1037] {}] datanode.BlockReceiver(316): Block 1073741854 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T16:32:16,338 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:43170 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741854_1037] {}] datanode.DataXceiver(331): 127.0.0.1:42103:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43170 dst: /127.0.0.1:42103 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:16,339 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK] 2024-12-09T16:32:16,341 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:16,341 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK], DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK]) is bad. 2024-12-09T16:32:16,341 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741855_1038 2024-12-09T16:32:16,342 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK] 2024-12-09T16:32:16,344 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:16,345 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK], DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]) is bad. 2024-12-09T16:32:16,345 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741856_1039 2024-12-09T16:32:16,346 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK] 2024-12-09T16:32:16,347 WARN [Thread-933 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:16,348 WARN [Thread-933 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK], DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK]) is bad. 2024-12-09T16:32:16,348 WARN [Thread-933 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741857_1040 2024-12-09T16:32:16,348 WARN [Thread-933 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK] 2024-12-09T16:32:16,349 WARN [IPC Server handler 2 on default port 38821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T16:32:16,349 WARN [IPC Server handler 2 on default port 38821 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T16:32:16,349 WARN [IPC Server handler 2 on default port 38821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T16:32:16,354 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:16,354 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:16,354 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:16,354 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:16,354 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:16,355 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761932308 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761936332 2024-12-09T16:32:16,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741843_1026 (size=25992) 2024-12-09T16:32:16,357 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40487:40487)] 2024-12-09T16:32:16,357 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761914435 is not closed yet, will try archiving it next time 2024-12-09T16:32:16,357 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761932308 is not closed yet, will try archiving it next time 2024-12-09T16:32:16,358 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761928281 to hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/oldWALs/dfce6ce5b10c%2C41215%2C1733761912669.1733761928281 2024-12-09T16:32:16,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41215 {}] regionserver.HRegion(8855): Flush requested on 1240e0f513578afb8fd45101b7305c2b 2024-12-09T16:32:16,424 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1240e0f513578afb8fd45101b7305c2b 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-09T16:32:16,432 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/.tmp/info/d6225cd302a1498799afb900fb8f985f is 1079, key is tmprow/info:/1733761936422/Put/seqid=0 2024-12-09T16:32:16,434 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:16,435 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK], DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK]) is bad. 2024-12-09T16:32:16,435 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741859_1042 2024-12-09T16:32:16,436 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK] 2024-12-09T16:32:16,438 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:16,438 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK], DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]) is bad. 2024-12-09T16:32:16,438 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741860_1043 2024-12-09T16:32:16,439 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK] 2024-12-09T16:32:16,441 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:16,442 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK], DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK]) is bad. 2024-12-09T16:32:16,442 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741861_1044 2024-12-09T16:32:16,442 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK] 2024-12-09T16:32:16,446 WARN [Thread-938 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38075 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:16,446 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:43182 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741862_1045] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data10]'}, localName='127.0.0.1:42103', datanodeUuid='d661179c-c250-402b-a520-ede73037293f', xmitsInProgress=0}:Exception transferring block BP-2100637919-172.17.0.2-1733761910595:blk_1073741862_1045 to mirror 127.0.0.1:38075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:16,446 WARN [Thread-938 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK], DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]) is bad. 2024-12-09T16:32:16,446 WARN [Thread-938 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741862_1045 2024-12-09T16:32:16,446 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:43182 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741862_1045] {}] datanode.BlockReceiver(316): Block 1073741862 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T16:32:16,447 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:43182 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741862_1045] {}] datanode.DataXceiver(331): 127.0.0.1:42103:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43182 dst: /127.0.0.1:42103 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:16,447 WARN [Thread-938 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK] 2024-12-09T16:32:16,448 WARN [IPC Server handler 4 on default port 38821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T16:32:16,448 WARN [IPC Server handler 4 on default port 38821 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T16:32:16,448 WARN [IPC Server handler 4 on default port 38821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T16:32:16,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741863_1046 (size=6027) 2024-12-09T16:32:16,758 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761914435 is not closed yet, will try archiving it next time 2024-12-09T16:32:16,854 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/.tmp/info/d6225cd302a1498799afb900fb8f985f 2024-12-09T16:32:16,865 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/.tmp/info/d6225cd302a1498799afb900fb8f985f as hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/d6225cd302a1498799afb900fb8f985f 2024-12-09T16:32:16,874 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/d6225cd302a1498799afb900fb8f985f, entries=1, sequenceid=34, filesize=5.9 K 2024-12-09T16:32:16,876 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 1240e0f513578afb8fd45101b7305c2b in 452ms, sequenceid=34, compaction requested=true 2024-12-09T16:32:16,876 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1240e0f513578afb8fd45101b7305c2b: 2024-12-09T16:32:16,876 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-12-09T16:32:16,876 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T16:32:16,877 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/ba478df70622483f9b6ce6eec60aaa5d because midkey is the same as first or last row 2024-12-09T16:32:16,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1240e0f513578afb8fd45101b7305c2b:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T16:32:16,877 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T16:32:16,877 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T16:32:16,879 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T16:32:16,879 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.HStore(1541): 1240e0f513578afb8fd45101b7305c2b/info is initiating minor compaction (all files) 2024-12-09T16:32:16,880 INFO [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 1240e0f513578afb8fd45101b7305c2b/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b. 2024-12-09T16:32:16,880 INFO [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/8e2537b74a4042cab87e48527662577a, hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/ba478df70622483f9b6ce6eec60aaa5d, hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/d6225cd302a1498799afb900fb8f985f] into tmpdir=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/.tmp, totalSize=28.2 K 2024-12-09T16:32:16,881 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8e2537b74a4042cab87e48527662577a, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733761930330 2024-12-09T16:32:16,882 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] compactions.Compactor(225): Compacting ba478df70622483f9b6ce6eec60aaa5d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1733761934370 2024-12-09T16:32:16,882 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] compactions.Compactor(225): Compacting d6225cd302a1498799afb900fb8f985f, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733761936422 2024-12-09T16:32:16,903 INFO [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1240e0f513578afb8fd45101b7305c2b#info#compaction#21 average throughput is 4.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T16:32:16,904 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/.tmp/info/b4c50f5f724549ee816f5caa39a23d64 is 1080, key is row0002/info:/1733761930330/Put/seqid=0 2024-12-09T16:32:16,906 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:16,906 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK], DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK]) is bad. 2024-12-09T16:32:16,906 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741864_1047 2024-12-09T16:32:16,907 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK] 2024-12-09T16:32:16,909 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:16,909 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK], DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]) is bad. 2024-12-09T16:32:16,909 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741865_1048 2024-12-09T16:32:16,910 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK] 2024-12-09T16:32:16,912 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:16,912 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK], DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK]) is bad. 2024-12-09T16:32:16,913 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741866_1049 2024-12-09T16:32:16,913 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK] 2024-12-09T16:32:16,916 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38075 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:16,916 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:43220 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741867_1050] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data10]'}, localName='127.0.0.1:42103', datanodeUuid='d661179c-c250-402b-a520-ede73037293f', xmitsInProgress=0}:Exception transferring block BP-2100637919-172.17.0.2-1733761910595:blk_1073741867_1050 to mirror 127.0.0.1:38075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:16,917 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK], DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]) is bad. 2024-12-09T16:32:16,917 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741867_1050 2024-12-09T16:32:16,917 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:43220 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741867_1050] {}] datanode.BlockReceiver(316): Block 1073741867 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T16:32:16,917 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:43220 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741867_1050] {}] datanode.DataXceiver(331): 127.0.0.1:42103:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43220 dst: /127.0.0.1:42103 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:16,918 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK] 2024-12-09T16:32:16,919 WARN [IPC Server handler 1 on default port 38821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T16:32:16,919 WARN [IPC Server handler 1 on default port 38821 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T16:32:16,919 WARN [IPC Server handler 1 on default port 38821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T16:32:16,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741868_1051 (size=17994) 2024-12-09T16:32:17,333 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/.tmp/info/b4c50f5f724549ee816f5caa39a23d64 as hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/b4c50f5f724549ee816f5caa39a23d64 2024-12-09T16:32:17,346 INFO [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 1240e0f513578afb8fd45101b7305c2b/info of 1240e0f513578afb8fd45101b7305c2b into b4c50f5f724549ee816f5caa39a23d64(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T16:32:17,346 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 1240e0f513578afb8fd45101b7305c2b: 2024-12-09T16:32:17,346 INFO [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b., storeName=1240e0f513578afb8fd45101b7305c2b/info, priority=13, startTime=1733761936877; duration=0sec 2024-12-09T16:32:17,346 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-09T16:32:17,346 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T16:32:17,346 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/b4c50f5f724549ee816f5caa39a23d64 because midkey is the same as first or last row 2024-12-09T16:32:17,346 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-09T16:32:17,346 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T16:32:17,346 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/b4c50f5f724549ee816f5caa39a23d64 because midkey is the same as first or last row 2024-12-09T16:32:17,346 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-09T16:32:17,347 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T16:32:17,347 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/b4c50f5f724549ee816f5caa39a23d64 because midkey is the same as first or last row 2024-12-09T16:32:17,347 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T16:32:17,347 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1240e0f513578afb8fd45101b7305c2b:info 2024-12-09T16:32:17,452 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:17,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41215 {}] regionserver.HRegion(8855): Flush requested on 1240e0f513578afb8fd45101b7305c2b 2024-12-09T16:32:17,848 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1240e0f513578afb8fd45101b7305c2b 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-09T16:32:17,853 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/.tmp/info/48ab7d03bdab4a759c2e0aeb27f0bcad is 1079, key is tmprow/info:/1733761937847/Put/seqid=0 2024-12-09T16:32:17,855 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:17,855 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK], DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK]) is bad. 2024-12-09T16:32:17,855 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741869_1052 2024-12-09T16:32:17,856 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK] 2024-12-09T16:32:17,857 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:17,857 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK], DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK]) is bad. 2024-12-09T16:32:17,858 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741870_1053 2024-12-09T16:32:17,858 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK] 2024-12-09T16:32:17,860 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:17,860 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK], DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]) is bad. 2024-12-09T16:32:17,860 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741871_1054 2024-12-09T16:32:17,861 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK] 2024-12-09T16:32:17,862 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:17,862 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK], DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]) is bad. 2024-12-09T16:32:17,863 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741872_1055 2024-12-09T16:32:17,863 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK] 2024-12-09T16:32:17,864 WARN [IPC Server handler 0 on default port 38821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T16:32:17,864 WARN [IPC Server handler 0 on default port 38821 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T16:32:17,864 WARN [IPC Server handler 0 on default port 38821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T16:32:17,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741873_1056 (size=6027) 2024-12-09T16:32:17,883 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:18,269 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/.tmp/info/48ab7d03bdab4a759c2e0aeb27f0bcad 2024-12-09T16:32:18,290 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/.tmp/info/48ab7d03bdab4a759c2e0aeb27f0bcad as hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/48ab7d03bdab4a759c2e0aeb27f0bcad 2024-12-09T16:32:18,304 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/48ab7d03bdab4a759c2e0aeb27f0bcad, entries=1, sequenceid=45, filesize=5.9 K 2024-12-09T16:32:18,305 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 1240e0f513578afb8fd45101b7305c2b in 457ms, sequenceid=45, compaction requested=false 2024-12-09T16:32:18,305 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1240e0f513578afb8fd45101b7305c2b: 2024-12-09T16:32:18,305 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-12-09T16:32:18,306 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T16:32:18,306 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/b4c50f5f724549ee816f5caa39a23d64 because midkey is the same as first or last row 2024-12-09T16:32:18,360 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:18,360 WARN [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK]] 2024-12-09T16:32:18,360 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog dfce6ce5b10c%2C41215%2C1733761912669:(num 1733761936332) roll requested 2024-12-09T16:32:18,361 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C41215%2C1733761912669.1733761938361 2024-12-09T16:32:18,366 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:18,367 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK], DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK]) is bad. 2024-12-09T16:32:18,367 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741874_1057 2024-12-09T16:32:18,368 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK] 2024-12-09T16:32:18,372 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38075 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:18,372 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK], DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]) is bad. 2024-12-09T16:32:18,372 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741875_1058 2024-12-09T16:32:18,373 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:43236 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741875_1058] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data10]'}, localName='127.0.0.1:42103', datanodeUuid='d661179c-c250-402b-a520-ede73037293f', xmitsInProgress=0}:Exception transferring block BP-2100637919-172.17.0.2-1733761910595:blk_1073741875_1058 to mirror 127.0.0.1:38075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:18,373 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:43236 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741875_1058] {}] datanode.BlockReceiver(316): Block 1073741875 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T16:32:18,374 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK] 2024-12-09T16:32:18,373 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:43236 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741875_1058] {}] datanode.DataXceiver(331): 127.0.0.1:42103:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43236 dst: /127.0.0.1:42103 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:18,376 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:18,377 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK], DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]) is bad. 2024-12-09T16:32:18,377 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741876_1059 2024-12-09T16:32:18,378 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK] 2024-12-09T16:32:18,381 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:18,381 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK], DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK]) is bad. 2024-12-09T16:32:18,381 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741877_1060 2024-12-09T16:32:18,383 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK] 2024-12-09T16:32:18,384 WARN [IPC Server handler 0 on default port 38821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T16:32:18,384 WARN [IPC Server handler 0 on default port 38821 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T16:32:18,385 WARN [IPC Server handler 0 on default port 38821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T16:32:18,390 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:18,390 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:18,390 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:18,390 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:18,390 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:18,391 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761936332 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761938361 2024-12-09T16:32:18,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741858_1041 (size=13591) 2024-12-09T16:32:18,399 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40487:40487)] 2024-12-09T16:32:18,399 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761914435 is not closed yet, will try archiving it next time 2024-12-09T16:32:18,399 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761936332 is not closed yet, will try archiving it next time 2024-12-09T16:32:18,400 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761932308 to hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/oldWALs/dfce6ce5b10c%2C41215%2C1733761912669.1733761932308 2024-12-09T16:32:18,795 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761914435 is not closed yet, will try archiving it next time 2024-12-09T16:32:19,190 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@77cf8950[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42103, datanodeUuid=d661179c-c250-402b-a520-ede73037293f, infoPort=40487, infoSecurePort=0, ipcPort=34039, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595):Failed to transfer BP-2100637919-172.17.0.2-1733761910595:blk_1073741843_1026 to 127.0.0.1:37681 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:19,190 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@61cde5f3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42103, datanodeUuid=d661179c-c250-402b-a520-ede73037293f, infoPort=40487, infoSecurePort=0, ipcPort=34039, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595):Failed to transfer BP-2100637919-172.17.0.2-1733761910595:blk_1073741863_1046 to 127.0.0.1:37681 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:19,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41215 {}] regionserver.HRegion(8855): Flush requested on 1240e0f513578afb8fd45101b7305c2b 2024-12-09T16:32:19,301 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1240e0f513578afb8fd45101b7305c2b 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-09T16:32:19,308 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/.tmp/info/ceff3f6a252e43349e4ac5a3fe4e76e9 is 1079, key is tmprow/info:/1733761939293/Put/seqid=0 2024-12-09T16:32:19,312 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:19,312 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK], DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK]) is bad. 2024-12-09T16:32:19,312 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741879_1062 2024-12-09T16:32:19,314 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK] 2024-12-09T16:32:19,328 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40975 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:19,328 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:43254 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741880_1063] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data10]'}, localName='127.0.0.1:42103', datanodeUuid='d661179c-c250-402b-a520-ede73037293f', xmitsInProgress=0}:Exception transferring block BP-2100637919-172.17.0.2-1733761910595:blk_1073741880_1063 to mirror 127.0.0.1:40975 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:19,328 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:43254 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741880_1063] {}] datanode.BlockReceiver(316): Block 1073741880 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T16:32:19,328 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK], DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]) is bad. 2024-12-09T16:32:19,329 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741880_1063 2024-12-09T16:32:19,329 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:43254 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741880_1063] {}] datanode.DataXceiver(331): 127.0.0.1:42103:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43254 dst: /127.0.0.1:42103 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:19,332 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK] 2024-12-09T16:32:19,340 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38075 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:19,340 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:43270 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741881_1064] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data10]'}, localName='127.0.0.1:42103', datanodeUuid='d661179c-c250-402b-a520-ede73037293f', xmitsInProgress=0}:Exception transferring block BP-2100637919-172.17.0.2-1733761910595:blk_1073741881_1064 to mirror 127.0.0.1:38075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:19,341 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK], DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]) is bad. 2024-12-09T16:32:19,341 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:43270 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741881_1064] {}] datanode.BlockReceiver(316): Block 1073741881 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T16:32:19,341 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741881_1064 2024-12-09T16:32:19,341 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:43270 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741881_1064] {}] datanode.DataXceiver(331): 127.0.0.1:42103:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43270 dst: /127.0.0.1:42103 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:19,344 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK] 2024-12-09T16:32:19,348 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:19,348 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK], DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK]) is bad. 2024-12-09T16:32:19,348 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741882_1065 2024-12-09T16:32:19,351 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK] 2024-12-09T16:32:19,352 WARN [IPC Server handler 0 on default port 38821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T16:32:19,352 WARN [IPC Server handler 0 on default port 38821 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T16:32:19,352 WARN [IPC Server handler 0 on default port 38821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T16:32:19,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741883_1066 (size=6027) 2024-12-09T16:32:19,361 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/.tmp/info/ceff3f6a252e43349e4ac5a3fe4e76e9 2024-12-09T16:32:19,370 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/.tmp/info/ceff3f6a252e43349e4ac5a3fe4e76e9 as hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/ceff3f6a252e43349e4ac5a3fe4e76e9 2024-12-09T16:32:19,380 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/ceff3f6a252e43349e4ac5a3fe4e76e9, entries=1, sequenceid=55, filesize=5.9 K 2024-12-09T16:32:19,381 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 1240e0f513578afb8fd45101b7305c2b in 80ms, sequenceid=55, compaction requested=true 2024-12-09T16:32:19,382 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1240e0f513578afb8fd45101b7305c2b: 2024-12-09T16:32:19,382 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-12-09T16:32:19,382 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T16:32:19,382 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/b4c50f5f724549ee816f5caa39a23d64 because midkey is the same as first or last row 2024-12-09T16:32:19,382 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1240e0f513578afb8fd45101b7305c2b:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T16:32:19,382 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T16:32:19,382 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T16:32:19,384 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T16:32:19,384 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.HStore(1541): 1240e0f513578afb8fd45101b7305c2b/info is initiating minor compaction (all files) 2024-12-09T16:32:19,385 INFO [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 1240e0f513578afb8fd45101b7305c2b/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b. 2024-12-09T16:32:19,385 INFO [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/b4c50f5f724549ee816f5caa39a23d64, hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/48ab7d03bdab4a759c2e0aeb27f0bcad, hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/ceff3f6a252e43349e4ac5a3fe4e76e9] into tmpdir=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/.tmp, totalSize=29.3 K 2024-12-09T16:32:19,386 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] compactions.Compactor(225): Compacting b4c50f5f724549ee816f5caa39a23d64, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733761930330 2024-12-09T16:32:19,386 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] compactions.Compactor(225): Compacting 48ab7d03bdab4a759c2e0aeb27f0bcad, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1733761937847 2024-12-09T16:32:19,387 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] compactions.Compactor(225): Compacting ceff3f6a252e43349e4ac5a3fe4e76e9, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733761939293 2024-12-09T16:32:19,422 INFO [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1240e0f513578afb8fd45101b7305c2b#info#compaction#24 average throughput is 1.76 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T16:32:19,423 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/.tmp/info/aa8691de4b7141ac99c656e321a6a7f1 is 1080, key is row0002/info:/1733761930330/Put/seqid=0 2024-12-09T16:32:19,427 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38075 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:19,427 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:43296 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741884_1067] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data10]'}, localName='127.0.0.1:42103', datanodeUuid='d661179c-c250-402b-a520-ede73037293f', xmitsInProgress=0}:Exception transferring block BP-2100637919-172.17.0.2-1733761910595:blk_1073741884_1067 to mirror 127.0.0.1:38075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:19,427 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK], DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]) is bad. 2024-12-09T16:32:19,427 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:43296 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741884_1067] {}] datanode.BlockReceiver(316): Block 1073741884 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T16:32:19,427 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741884_1067 2024-12-09T16:32:19,428 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:43296 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741884_1067] {}] datanode.DataXceiver(331): 127.0.0.1:42103:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43296 dst: /127.0.0.1:42103 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:19,429 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK] 2024-12-09T16:32:19,431 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:19,431 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK], DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK]) is bad. 2024-12-09T16:32:19,431 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741885_1068 2024-12-09T16:32:19,432 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK] 2024-12-09T16:32:19,435 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40975 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:19,435 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:43298 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741886_1069] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data10]'}, localName='127.0.0.1:42103', datanodeUuid='d661179c-c250-402b-a520-ede73037293f', xmitsInProgress=0}:Exception transferring block BP-2100637919-172.17.0.2-1733761910595:blk_1073741886_1069 to mirror 127.0.0.1:40975 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:19,435 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK], DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]) is bad. 2024-12-09T16:32:19,435 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741886_1069 2024-12-09T16:32:19,435 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:43298 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741886_1069] {}] datanode.BlockReceiver(316): Block 1073741886 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T16:32:19,436 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:43298 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741886_1069] {}] datanode.DataXceiver(331): 127.0.0.1:42103:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43298 dst: /127.0.0.1:42103 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:19,436 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK] 2024-12-09T16:32:19,438 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:19,438 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK], DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK]) is bad. 2024-12-09T16:32:19,438 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741887_1070 2024-12-09T16:32:19,439 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43671,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK] 2024-12-09T16:32:19,441 WARN [IPC Server handler 2 on default port 38821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T16:32:19,441 WARN [IPC Server handler 2 on default port 38821 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T16:32:19,441 WARN [IPC Server handler 2 on default port 38821 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T16:32:19,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741888_1071 (size=18097) 2024-12-09T16:32:19,452 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:19,855 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/.tmp/info/aa8691de4b7141ac99c656e321a6a7f1 as hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/aa8691de4b7141ac99c656e321a6a7f1 2024-12-09T16:32:19,866 INFO [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 1240e0f513578afb8fd45101b7305c2b/info of 1240e0f513578afb8fd45101b7305c2b into aa8691de4b7141ac99c656e321a6a7f1(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T16:32:19,866 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 1240e0f513578afb8fd45101b7305c2b: 2024-12-09T16:32:19,866 INFO [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b., storeName=1240e0f513578afb8fd45101b7305c2b/info, priority=13, startTime=1733761939382; duration=0sec 2024-12-09T16:32:19,866 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-09T16:32:19,866 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T16:32:19,866 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/aa8691de4b7141ac99c656e321a6a7f1 because midkey is the same as first or last row 2024-12-09T16:32:19,867 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-09T16:32:19,867 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T16:32:19,867 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/aa8691de4b7141ac99c656e321a6a7f1 because midkey is the same as first or last row 2024-12-09T16:32:19,867 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-12-09T16:32:19,867 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T16:32:19,867 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/aa8691de4b7141ac99c656e321a6a7f1 because midkey is the same as first or last row 2024-12-09T16:32:19,867 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T16:32:19,867 DEBUG [RS:0;dfce6ce5b10c:41215-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1240e0f513578afb8fd45101b7305c2b:info 2024-12-09T16:32:19,883 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:20,190 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@77cf8950[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42103, datanodeUuid=d661179c-c250-402b-a520-ede73037293f, infoPort=40487, infoSecurePort=0, ipcPort=34039, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595):Failed to transfer BP-2100637919-172.17.0.2-1733761910595:blk_1073741873_1056 to 127.0.0.1:43671 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:20,190 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@61cde5f3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42103, datanodeUuid=d661179c-c250-402b-a520-ede73037293f, infoPort=40487, infoSecurePort=0, ipcPort=34039, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595):Failed to transfer BP-2100637919-172.17.0.2-1733761910595:blk_1073741868_1051 to 127.0.0.1:43671 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:20,400 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:20,400 WARN [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-12-09T16:32:20,538 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:32:20,554 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:32:20,556 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:32:20,556 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:32:20,556 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T16:32:20,557 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c767c56{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:32:20,557 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@721d4444{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:32:20,692 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@74e983d1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/java.io.tmpdir/jetty-localhost-41029-hadoop-hdfs-3_4_1-tests_jar-_-any-11972501046212356416/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:32:20,692 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@48ed1e93{HTTP/1.1, (http/1.1)}{localhost:41029} 2024-12-09T16:32:20,692 INFO [Time-limited test {}] server.Server(415): Started @139858ms 2024-12-09T16:32:20,694 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T16:32:21,065 WARN [Thread-985 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T16:32:21,077 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8c53905c98f977a6 with lease ID 0x2522342f5d96fea: from storage DS-84167a42-ec4b-4e4c-a636-257392bdeaa0 node DatanodeRegistration(127.0.0.1:43507, datanodeUuid=07f6efee-23eb-4e00-b421-21c4606f42e4, infoPort=41391, infoSecurePort=0, ipcPort=35481, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T16:32:21,078 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8c53905c98f977a6 with lease ID 0x2522342f5d96fea: from storage DS-8ed61516-bb4e-45b8-af33-413bf2e5281e node DatanodeRegistration(127.0.0.1:43507, datanodeUuid=07f6efee-23eb-4e00-b421-21c4606f42e4, infoPort=41391, infoSecurePort=0, ipcPort=35481, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T16:32:21,453 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:21,884 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:22,190 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@61cde5f3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:42103, datanodeUuid=d661179c-c250-402b-a520-ede73037293f, infoPort=40487, infoSecurePort=0, ipcPort=34039, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595):Failed to transfer BP-2100637919-172.17.0.2-1733761910595:blk_1073741883_1066 to 127.0.0.1:38075 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:22,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741858_1041 (size=13591) 2024-12-09T16:32:22,400 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:22,409 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T16:32:23,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741888_1071 (size=18097) 2024-12-09T16:32:23,453 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:23,884 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:24,031 ERROR [FSHLog-0-hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData-prefix:dfce6ce5b10c,39507,1733761912458 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:24,032 WARN [FSHLog-0-hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData-prefix:dfce6ce5b10c,39507,1733761912458 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:24,032 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog dfce6ce5b10c%2C39507%2C1733761912458:(num 1733761913658) roll requested 2024-12-09T16:32:24,032 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C39507%2C1733761912458.1733761944032 2024-12-09T16:32:24,036 WARN [Thread-1007 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1072 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38075 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:24,036 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1095561119_22 at /127.0.0.1:55572 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741889_1072] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data4]'}, localName='127.0.0.1:43507', datanodeUuid='07f6efee-23eb-4e00-b421-21c4606f42e4', xmitsInProgress=0}:Exception transferring block BP-2100637919-172.17.0.2-1733761910595:blk_1073741889_1072 to mirror 127.0.0.1:38075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:24,036 WARN [Thread-1007 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741889_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43507,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK], DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]) is bad. 2024-12-09T16:32:24,036 WARN [Thread-1007 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741889_1072 2024-12-09T16:32:24,036 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1095561119_22 at /127.0.0.1:55572 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741889_1072] {}] datanode.BlockReceiver(316): Block 1073741889 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T16:32:24,036 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1095561119_22 at /127.0.0.1:55572 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741889_1072] {}] datanode.DataXceiver(331): 127.0.0.1:43507:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55572 dst: /127.0.0.1:43507 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:24,037 WARN [Thread-1007 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK] 2024-12-09T16:32:24,041 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:24,041 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:24,041 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:24,041 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:24,041 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:24,042 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 with entries=54, filesize=26.65 KB; new WAL /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761944032 2024-12-09T16:32:24,042 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:24,042 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:24,042 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 2024-12-09T16:32:24,043 WARN [IPC Server handler 1 on default port 38821 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 has not been closed. Lease recovery is in progress. RecoveryId = 1074 for block blk_1073741830_1014 2024-12-09T16:32:24,043 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40487:40487),(127.0.0.1/127.0.0.1:41391:41391)] 2024-12-09T16:32:24,043 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 is not closed yet, will try archiving it next time 2024-12-09T16:32:24,044 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 after 1ms 2024-12-09T16:32:24,401 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:25,885 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:26,401 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:27,885 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:28,045 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 after 4003ms 2024-12-09T16:32:28,401 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:29,885 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:30,402 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:31,092 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4c43f8a2 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-2100637919-172.17.0.2-1733761910595:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:40975,null,null]) java.net.ConnectException: Call From dfce6ce5b10c/172.17.0.2 to localhost:33373 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-09T16:32:31,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741833_1020 (size=455) 2024-12-09T16:32:31,336 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761914435 to hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/oldWALs/dfce6ce5b10c%2C41215%2C1733761912669.1733761914435 2024-12-09T16:32:31,337 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761936332 to hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/oldWALs/dfce6ce5b10c%2C41215%2C1733761912669.1733761936332 2024-12-09T16:32:31,886 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:32,402 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:33,886 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:34,072 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7d459011[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:43507, datanodeUuid=07f6efee-23eb-4e00-b421-21c4606f42e4, infoPort=41391, infoSecurePort=0, ipcPort=35481, storageInfo=lv=-57;cid=testClusterID;nsid=1268311500;c=1733761910595):Failed to transfer BP-2100637919-172.17.0.2-1733761910595:blk_1073741833_1020 to 127.0.0.1:37681 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:34,146 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C41215%2C1733761912669.1733761954145 2024-12-09T16:32:34,150 WARN [Thread-1017 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:34,150 WARN [Thread-1017 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741891_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK]) is bad. 2024-12-09T16:32:34,150 WARN [Thread-1017 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741891_1075 2024-12-09T16:32:34,151 WARN [Thread-1017 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK] 2024-12-09T16:32:34,153 WARN [Thread-1017 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1076 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:34,154 WARN [Thread-1017 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741892_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK], DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]) is bad. 2024-12-09T16:32:34,154 WARN [Thread-1017 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741892_1076 2024-12-09T16:32:34,155 WARN [Thread-1017 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK] 2024-12-09T16:32:34,157 WARN [Thread-1017 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1077 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:34,160 WARN [Thread-1017 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741893_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]) is bad. 2024-12-09T16:32:34,160 WARN [Thread-1017 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741893_1077 2024-12-09T16:32:34,161 WARN [Thread-1017 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK] 2024-12-09T16:32:34,169 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:34,169 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:34,169 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:34,170 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:34,170 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:34,170 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761938361 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761954145 2024-12-09T16:32:34,171 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41391:41391),(127.0.0.1/127.0.0.1:40487:40487)] 2024-12-09T16:32:34,171 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.1733761938361 is not closed yet, will try archiving it next time 2024-12-09T16:32:34,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741878_1061 (size=12911) 2024-12-09T16:32:34,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41215 {}] regionserver.HRegion(8855): Flush requested on 1240e0f513578afb8fd45101b7305c2b 2024-12-09T16:32:34,176 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1240e0f513578afb8fd45101b7305c2b 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-09T16:32:34,183 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/.tmp/info/86858192478b46888409d3fba8bfe8cc is 1080, key is row0013/info:/1733761954172/Put/seqid=0 2024-12-09T16:32:34,185 WARN [Thread-1023 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741895_1079 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:34,186 WARN [Thread-1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741895_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK], DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]) is bad. 2024-12-09T16:32:34,186 WARN [Thread-1023 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741895_1079 2024-12-09T16:32:34,187 WARN [Thread-1023 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK] 2024-12-09T16:32:34,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741896_1080 (size=8190) 2024-12-09T16:32:34,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741896_1080 (size=8190) 2024-12-09T16:32:34,197 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/.tmp/info/86858192478b46888409d3fba8bfe8cc 2024-12-09T16:32:34,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/.tmp/info/86858192478b46888409d3fba8bfe8cc as hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/86858192478b46888409d3fba8bfe8cc 2024-12-09T16:32:34,216 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/86858192478b46888409d3fba8bfe8cc, entries=3, sequenceid=66, filesize=8.0 K 2024-12-09T16:32:34,217 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for 1240e0f513578afb8fd45101b7305c2b in 41ms, sequenceid=66, compaction requested=false 2024-12-09T16:32:34,217 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1240e0f513578afb8fd45101b7305c2b: 2024-12-09T16:32:34,218 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-12-09T16:32:34,218 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T16:32:34,218 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/aa8691de4b7141ac99c656e321a6a7f1 because midkey is the same as first or last row 2024-12-09T16:32:34,403 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:34,403 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-12-09T16:32:34,406 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T16:32:34,406 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T16:32:34,406 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T16:32:34,406 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:32:34,407 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:32:34,407 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T16:32:34,407 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=489911513, stopped=false 2024-12-09T16:32:34,407 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T16:32:34,407 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=dfce6ce5b10c,39507,1733761912458 2024-12-09T16:32:34,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34107-0x100bfc54d170002, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T16:32:34,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T16:32:34,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34107-0x100bfc54d170002, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:32:34,421 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41215-0x100bfc54d170001, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T16:32:34,421 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41215-0x100bfc54d170001, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:32:34,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:32:34,422 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T16:32:34,422 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34107-0x100bfc54d170002, quorum=127.0.0.1:62591, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:32:34,422 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T16:32:34,422 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:32:34,422 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T16:32:34,422 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:32:34,422 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41215-0x100bfc54d170001, quorum=127.0.0.1:62591, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:32:34,422 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'dfce6ce5b10c,41215,1733761912669' ***** 2024-12-09T16:32:34,422 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T16:32:34,422 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'dfce6ce5b10c,34107,1733761915656' ***** 2024-12-09T16:32:34,422 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T16:32:34,423 INFO [RS:1;dfce6ce5b10c:34107 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T16:32:34,423 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T16:32:34,423 INFO [RS:1;dfce6ce5b10c:34107 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T16:32:34,423 INFO [RS:0;dfce6ce5b10c:41215 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T16:32:34,423 INFO [RS:1;dfce6ce5b10c:34107 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T16:32:34,423 INFO [RS:1;dfce6ce5b10c:34107 {}] regionserver.HRegionServer(959): stopping server dfce6ce5b10c,34107,1733761915656 2024-12-09T16:32:34,423 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T16:32:34,423 INFO [RS:0;dfce6ce5b10c:41215 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T16:32:34,423 INFO [RS:1;dfce6ce5b10c:34107 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T16:32:34,423 INFO [RS:0;dfce6ce5b10c:41215 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T16:32:34,424 INFO [RS:0;dfce6ce5b10c:41215 {}] regionserver.HRegionServer(3091): Received CLOSE for 1240e0f513578afb8fd45101b7305c2b 2024-12-09T16:32:34,424 INFO [RS:1;dfce6ce5b10c:34107 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;dfce6ce5b10c:34107. 2024-12-09T16:32:34,424 DEBUG [RS:1;dfce6ce5b10c:34107 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T16:32:34,424 DEBUG [RS:1;dfce6ce5b10c:34107 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:32:34,424 INFO [RS:1;dfce6ce5b10c:34107 {}] regionserver.HRegionServer(976): stopping server dfce6ce5b10c,34107,1733761915656; all regions closed. 2024-12-09T16:32:34,424 INFO [RS:0;dfce6ce5b10c:41215 {}] regionserver.HRegionServer(959): stopping server dfce6ce5b10c,41215,1733761912669 2024-12-09T16:32:34,424 INFO [RS:0;dfce6ce5b10c:41215 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T16:32:34,424 INFO [RS:0;dfce6ce5b10c:41215 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;dfce6ce5b10c:41215. 2024-12-09T16:32:34,424 DEBUG [RS:0;dfce6ce5b10c:41215 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T16:32:34,424 DEBUG [RS:0;dfce6ce5b10c:41215 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:32:34,425 INFO [RS:0;dfce6ce5b10c:41215 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T16:32:34,425 INFO [RS:0;dfce6ce5b10c:41215 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T16:32:34,425 INFO [RS:0;dfce6ce5b10c:41215 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T16:32:34,425 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:34,425 INFO [RS:0;dfce6ce5b10c:41215 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T16:32:34,425 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:34,425 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:34,425 INFO [RS:0;dfce6ce5b10c:41215 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T16:32:34,425 DEBUG [RS:0;dfce6ce5b10c:41215 {}] regionserver.HRegionServer(1325): Online Regions={1240e0f513578afb8fd45101b7305c2b=TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b., 1588230740=hbase:meta,,1.1588230740} 2024-12-09T16:32:34,425 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:34,425 DEBUG [RS:0;dfce6ce5b10c:41215 {}] regionserver.HRegionServer(1351): Waiting on 1240e0f513578afb8fd45101b7305c2b, 1588230740 2024-12-09T16:32:34,425 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:34,425 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T16:32:34,425 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 1240e0f513578afb8fd45101b7305c2b, disabling compactions & flushes 2024-12-09T16:32:34,425 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T16:32:34,425 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b. 2024-12-09T16:32:34,425 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T16:32:34,425 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b. 2024-12-09T16:32:34,425 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T16:32:34,425 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b. after waiting 0 ms 2024-12-09T16:32:34,425 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T16:32:34,425 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b. 2024-12-09T16:32:34,426 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 1240e0f513578afb8fd45101b7305c2b 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-12-09T16:32:34,426 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-12-09T16:32:34,426 ERROR [FSHLog-0-hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52-prefix:dfce6ce5b10c,41215,1733761912669.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:34,426 WARN [FSHLog-0-hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52-prefix:dfce6ce5b10c,41215,1733761912669.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:34,426 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog dfce6ce5b10c%2C41215%2C1733761912669.meta:.meta(num 1733761915111) roll requested 2024-12-09T16:32:34,427 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761954427.meta 2024-12-09T16:32:34,427 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:34,427 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:34,428 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 2024-12-09T16:32:34,428 WARN [IPC Server handler 2 on default port 38821 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 has not been closed. Lease recovery is in progress. RecoveryId = 1081 for block blk_1073741837_1013 2024-12-09T16:32:34,429 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 after 1ms 2024-12-09T16:32:34,436 WARN [Thread-1031 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1082 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:34,436 WARN [Thread-1031 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741897_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK], DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]) is bad. 2024-12-09T16:32:34,436 WARN [Thread-1031 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741897_1082 2024-12-09T16:32:34,437 WARN [Thread-1031 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK] 2024-12-09T16:32:34,448 WARN [Thread-1031 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1083 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38075 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:34,448 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:39100 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741898_1083] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data10]'}, localName='127.0.0.1:42103', datanodeUuid='d661179c-c250-402b-a520-ede73037293f', xmitsInProgress=0}:Exception transferring block BP-2100637919-172.17.0.2-1733761910595:blk_1073741898_1083 to mirror 127.0.0.1:38075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:34,448 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:39100 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741898_1083] {}] datanode.BlockReceiver(316): Block 1073741898 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T16:32:34,448 WARN [Thread-1031 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741898_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK], DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]) is bad. 2024-12-09T16:32:34,448 WARN [Thread-1031 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741898_1083 2024-12-09T16:32:34,448 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:39100 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741898_1083] {}] datanode.DataXceiver(331): 127.0.0.1:42103:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39100 dst: /127.0.0.1:42103 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:34,449 WARN [Thread-1031 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK] 2024-12-09T16:32:34,451 WARN [Thread-1031 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1084 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:34,452 WARN [Thread-1031 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741899_1084 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK], DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK]) is bad. 2024-12-09T16:32:34,452 WARN [Thread-1031 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741899_1084 2024-12-09T16:32:34,453 WARN [Thread-1031 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK] 2024-12-09T16:32:34,453 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/.tmp/info/a43569fede7140348f03ffc6be806941 is 1080, key is row0015/info:/1733761954178/Put/seqid=0 2024-12-09T16:32:34,461 WARN [Thread-1032 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1086 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37681 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:34,462 WARN [Thread-1032 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741901_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK], DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK]) is bad. 2024-12-09T16:32:34,462 WARN [Thread-1032 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741901_1086 2024-12-09T16:32:34,461 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:39124 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741901_1086] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data10]'}, localName='127.0.0.1:42103', datanodeUuid='d661179c-c250-402b-a520-ede73037293f', xmitsInProgress=0}:Exception transferring block BP-2100637919-172.17.0.2-1733761910595:blk_1073741901_1086 to mirror 127.0.0.1:37681 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:34,462 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:39124 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741901_1086] {}] datanode.BlockReceiver(316): Block 1073741901 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T16:32:34,463 WARN [Thread-1032 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK] 2024-12-09T16:32:34,463 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1250825291_22 at /127.0.0.1:39124 [Receiving block BP-2100637919-172.17.0.2-1733761910595:blk_1073741901_1086] {}] datanode.DataXceiver(331): 127.0.0.1:42103:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39124 dst: /127.0.0.1:42103 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:32:34,465 WARN [Thread-1032 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741902_1087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:34,465 WARN [Thread-1032 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741902_1087 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK], DatanodeInfoWithStorage[127.0.0.1:42103,DS-49a247fc-21f2-41d5-8647-53ea95b3ffa2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]) is bad. 2024-12-09T16:32:34,465 WARN [Thread-1032 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741902_1087 2024-12-09T16:32:34,466 WARN [Thread-1032 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK] 2024-12-09T16:32:34,475 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:34,476 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:34,476 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:34,476 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:34,476 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:34,476 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761954427.meta 2024-12-09T16:32:34,481 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:34,481 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:34,482 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta 2024-12-09T16:32:34,483 WARN [IPC Server handler 0 on default port 38821 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta has not been closed. Lease recovery is in progress. RecoveryId = 1089 for block blk_1073741834_1010 2024-12-09T16:32:34,484 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta after 2ms 2024-12-09T16:32:34,493 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40487:40487),(127.0.0.1/127.0.0.1:41391:41391)] 2024-12-09T16:32:34,493 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta is not closed yet, will try archiving it next time 2024-12-09T16:32:34,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741903_1088 (size=14660) 2024-12-09T16:32:34,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741903_1088 (size=14660) 2024-12-09T16:32:34,498 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/.tmp/info/a43569fede7140348f03ffc6be806941 2024-12-09T16:32:34,544 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/.tmp/info/a43569fede7140348f03ffc6be806941 as hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/a43569fede7140348f03ffc6be806941 2024-12-09T16:32:34,554 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/a43569fede7140348f03ffc6be806941, entries=9, sequenceid=78, filesize=14.3 K 2024-12-09T16:32:34,555 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/hbase/meta/1588230740/.tmp/info/7f3a3c55fb42466fb380bd07951103df is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b./info:regioninfo/1733761916656/Put/seqid=0 2024-12-09T16:32:34,556 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 1240e0f513578afb8fd45101b7305c2b in 131ms, sequenceid=78, compaction requested=true 2024-12-09T16:32:34,558 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/8e2537b74a4042cab87e48527662577a, hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/ba478df70622483f9b6ce6eec60aaa5d, hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/b4c50f5f724549ee816f5caa39a23d64, hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/d6225cd302a1498799afb900fb8f985f, hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/48ab7d03bdab4a759c2e0aeb27f0bcad, hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/ceff3f6a252e43349e4ac5a3fe4e76e9] to archive 2024-12-09T16:32:34,560 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T16:32:34,563 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/8e2537b74a4042cab87e48527662577a to hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/8e2537b74a4042cab87e48527662577a 2024-12-09T16:32:34,565 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/ba478df70622483f9b6ce6eec60aaa5d to hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/ba478df70622483f9b6ce6eec60aaa5d 2024-12-09T16:32:34,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741904_1090 (size=7089) 2024-12-09T16:32:34,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741904_1090 (size=7089) 2024-12-09T16:32:34,567 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/hbase/meta/1588230740/.tmp/info/7f3a3c55fb42466fb380bd07951103df 2024-12-09T16:32:34,568 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/b4c50f5f724549ee816f5caa39a23d64 to hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/b4c50f5f724549ee816f5caa39a23d64 2024-12-09T16:32:34,570 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/d6225cd302a1498799afb900fb8f985f to hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/d6225cd302a1498799afb900fb8f985f 2024-12-09T16:32:34,585 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/48ab7d03bdab4a759c2e0aeb27f0bcad to hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/48ab7d03bdab4a759c2e0aeb27f0bcad 2024-12-09T16:32:34,587 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/ceff3f6a252e43349e4ac5a3fe4e76e9 to hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/info/ceff3f6a252e43349e4ac5a3fe4e76e9 2024-12-09T16:32:34,588 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=dfce6ce5b10c:39507 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-09T16:32:34,588 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [8e2537b74a4042cab87e48527662577a=10347, ba478df70622483f9b6ce6eec60aaa5d=12506, b4c50f5f724549ee816f5caa39a23d64=17994, d6225cd302a1498799afb900fb8f985f=6027, 48ab7d03bdab4a759c2e0aeb27f0bcad=6027, ceff3f6a252e43349e4ac5a3fe4e76e9=6027] 2024-12-09T16:32:34,595 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/default/TestLogRolling-testLogRollOnDatanodeDeath/1240e0f513578afb8fd45101b7305c2b/recovered.edits/81.seqid, newMaxSeqId=81, maxSeqId=1 2024-12-09T16:32:34,596 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b. 2024-12-09T16:32:34,597 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 1240e0f513578afb8fd45101b7305c2b: Waiting for close lock at 1733761954425Running coprocessor pre-close hooks at 1733761954425Disabling compacts and flushes for region at 1733761954425Disabling writes for close at 1733761954425Obtaining lock to block concurrent updates at 1733761954426 (+1 ms)Preparing flush snapshotting stores in 1240e0f513578afb8fd45101b7305c2b at 1733761954426Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b., syncing WAL and waiting on mvcc, flushsize=dataSize=9683, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1733761954426Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b. at 1733761954427 (+1 ms)Flushing 1240e0f513578afb8fd45101b7305c2b/info: creating writer at 1733761954427Flushing 1240e0f513578afb8fd45101b7305c2b/info: appending metadata at 1733761954452 (+25 ms)Flushing 1240e0f513578afb8fd45101b7305c2b/info: closing flushed file at 1733761954452Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@106ef0f2: reopening flushed file at 1733761954527 (+75 ms)Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 1240e0f513578afb8fd45101b7305c2b in 131ms, sequenceid=78, compaction requested=true at 1733761954556 (+29 ms)Writing region close event to WAL at 1733761954589 (+33 ms)Running coprocessor post-close hooks at 1733761954596 (+7 ms)Closed at 1733761954596 2024-12-09T16:32:34,597 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733761915943.1240e0f513578afb8fd45101b7305c2b. 2024-12-09T16:32:34,609 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/hbase/meta/1588230740/.tmp/ns/c5fc29d7dbab478b9fdd9a7388384825 is 43, key is default/ns:d/1733761915496/Put/seqid=0 2024-12-09T16:32:34,617 WARN [Thread-1051 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741905_1091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:34,618 WARN [Thread-1051 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741905_1091 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK], DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]) is bad. 2024-12-09T16:32:34,618 WARN [Thread-1051 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741905_1091 2024-12-09T16:32:34,619 WARN [Thread-1051 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK] 2024-12-09T16:32:34,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741906_1092 (size=5153) 2024-12-09T16:32:34,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741906_1092 (size=5153) 2024-12-09T16:32:34,624 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/hbase/meta/1588230740/.tmp/ns/c5fc29d7dbab478b9fdd9a7388384825 2024-12-09T16:32:34,625 DEBUG [RS:0;dfce6ce5b10c:41215 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T16:32:34,651 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/hbase/meta/1588230740/.tmp/table/c7a4039b9ba24253925233a23973bb74 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1733761916780/Put/seqid=0 2024-12-09T16:32:34,653 WARN [Thread-1058 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741907_1093 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:34,653 WARN [Thread-1058 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741907_1093 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK], DatanodeInfoWithStorage[127.0.0.1:37681,DS-393c19d5-6fdc-4f17-afd7-6dcf9c0b6868,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK]) is bad. 2024-12-09T16:32:34,653 WARN [Thread-1058 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741907_1093 2024-12-09T16:32:34,654 WARN [Thread-1058 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38075,DS-63692479-bf45-400c-9f25-6a83d70cd7aa,DISK] 2024-12-09T16:32:34,656 WARN [Thread-1058 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741908_1094 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:32:34,656 WARN [Thread-1058 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2100637919-172.17.0.2-1733761910595:blk_1073741908_1094 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK], DatanodeInfoWithStorage[127.0.0.1:43507,DS-84167a42-ec4b-4e4c-a636-257392bdeaa0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK]) is bad. 2024-12-09T16:32:34,656 WARN [Thread-1058 {}] hdfs.DataStreamer(1850): Abandoning BP-2100637919-172.17.0.2-1733761910595:blk_1073741908_1094 2024-12-09T16:32:34,657 WARN [Thread-1058 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40975,DS-537b82e7-990e-4f2a-b129-a6cbd341b343,DISK] 2024-12-09T16:32:34,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741909_1095 (size=5424) 2024-12-09T16:32:34,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741909_1095 (size=5424) 2024-12-09T16:32:34,672 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/hbase/meta/1588230740/.tmp/table/c7a4039b9ba24253925233a23973bb74 2024-12-09T16:32:34,682 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/hbase/meta/1588230740/.tmp/info/7f3a3c55fb42466fb380bd07951103df as hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/hbase/meta/1588230740/info/7f3a3c55fb42466fb380bd07951103df 2024-12-09T16:32:34,693 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/hbase/meta/1588230740/info/7f3a3c55fb42466fb380bd07951103df, entries=10, sequenceid=11, filesize=6.9 K 2024-12-09T16:32:34,695 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/hbase/meta/1588230740/.tmp/ns/c5fc29d7dbab478b9fdd9a7388384825 as hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/hbase/meta/1588230740/ns/c5fc29d7dbab478b9fdd9a7388384825 2024-12-09T16:32:34,710 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/hbase/meta/1588230740/ns/c5fc29d7dbab478b9fdd9a7388384825, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T16:32:34,712 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/hbase/meta/1588230740/.tmp/table/c7a4039b9ba24253925233a23973bb74 as hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/hbase/meta/1588230740/table/c7a4039b9ba24253925233a23973bb74 2024-12-09T16:32:34,722 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/hbase/meta/1588230740/table/c7a4039b9ba24253925233a23973bb74, entries=2, sequenceid=11, filesize=5.3 K 2024-12-09T16:32:34,723 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 298ms, sequenceid=11, compaction requested=false 2024-12-09T16:32:34,732 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T16:32:34,733 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T16:32:34,733 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T16:32:34,734 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733761954425Running coprocessor pre-close hooks at 1733761954425Disabling compacts and flushes for region at 1733761954425Disabling writes for close at 1733761954425Obtaining lock to block concurrent updates at 1733761954426 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733761954426Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1733761954427 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733761954494 (+67 ms)Flushing 1588230740/info: creating writer at 1733761954494Flushing 1588230740/info: appending metadata at 1733761954555 (+61 ms)Flushing 1588230740/info: closing flushed file at 1733761954555Flushing 1588230740/ns: creating writer at 1733761954585 (+30 ms)Flushing 1588230740/ns: appending metadata at 1733761954608 (+23 ms)Flushing 1588230740/ns: closing flushed file at 1733761954608Flushing 1588230740/table: creating writer at 1733761954632 (+24 ms)Flushing 1588230740/table: appending metadata at 1733761954650 (+18 ms)Flushing 1588230740/table: closing flushed file at 1733761954650Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f1b89a: reopening flushed file at 1733761954680 (+30 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d2e9a7d: reopening flushed file at 1733761954693 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48db54fe: reopening flushed file at 1733761954711 (+18 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 298ms, sequenceid=11, compaction requested=false at 1733761954723 (+12 ms)Writing region close event to WAL at 1733761954727 (+4 ms)Running coprocessor post-close hooks at 1733761954733 (+6 ms)Closed at 1733761954733 2024-12-09T16:32:34,734 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T16:32:34,826 INFO [RS:0;dfce6ce5b10c:41215 {}] regionserver.HRegionServer(976): stopping server dfce6ce5b10c,41215,1733761912669; all regions closed. 2024-12-09T16:32:34,826 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:34,826 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:34,827 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:34,827 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:34,827 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:34,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741900_1085 (size=825) 2024-12-09T16:32:34,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741900_1085 (size=825) 2024-12-09T16:32:34,884 INFO [regionserver/dfce6ce5b10c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T16:32:34,884 INFO [regionserver/dfce6ce5b10c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T16:32:35,282 INFO [regionserver/dfce6ce5b10c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T16:32:35,282 INFO [regionserver/dfce6ce5b10c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T16:32:35,884 INFO [regionserver/dfce6ce5b10c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T16:32:36,194 INFO [master/dfce6ce5b10c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T16:32:36,195 INFO [master/dfce6ce5b10c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T16:32:36,311 INFO [regionserver/dfce6ce5b10c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T16:32:37,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741835_1011 (size=393) 2024-12-09T16:32:37,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741831_1007 (size=1321) 2024-12-09T16:32:37,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741878_1061 (size=12911) 2024-12-09T16:32:38,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741829_1005 (size=34) 2024-12-09T16:32:38,430 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 after 4002ms 2024-12-09T16:32:38,485 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta after 4003ms 2024-12-09T16:32:39,426 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-09T16:32:39,429 DEBUG [RS:1;dfce6ce5b10c:34107 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/oldWALs 2024-12-09T16:32:39,429 INFO [RS:1;dfce6ce5b10c:34107 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog dfce6ce5b10c%2C34107%2C1733761915656:(num 1733761916051) 2024-12-09T16:32:39,429 DEBUG [RS:1;dfce6ce5b10c:34107 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:32:39,429 INFO [RS:1;dfce6ce5b10c:34107 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T16:32:39,429 INFO [RS:1;dfce6ce5b10c:34107 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T16:32:39,429 INFO [RS:1;dfce6ce5b10c:34107 {}] hbase.ChoreService(370): Chore service for: regionserver/dfce6ce5b10c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T16:32:39,429 INFO [RS:1;dfce6ce5b10c:34107 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T16:32:39,429 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T16:32:39,429 INFO [RS:1;dfce6ce5b10c:34107 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T16:32:39,429 INFO [RS:1;dfce6ce5b10c:34107 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T16:32:39,429 INFO [RS:1;dfce6ce5b10c:34107 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T16:32:39,429 INFO [RS:1;dfce6ce5b10c:34107 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34107 2024-12-09T16:32:39,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:39,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34107-0x100bfc54d170002, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/dfce6ce5b10c,34107,1733761915656 2024-12-09T16:32:39,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T16:32:39,471 INFO [RS:1;dfce6ce5b10c:34107 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T16:32:39,471 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [dfce6ce5b10c,34107,1733761915656] 2024-12-09T16:32:39,487 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/dfce6ce5b10c,34107,1733761915656 already deleted, retry=false 2024-12-09T16:32:39,487 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; dfce6ce5b10c,34107,1733761915656 expired; onlineServers=1 2024-12-09T16:32:39,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34107-0x100bfc54d170002, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:32:39,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34107-0x100bfc54d170002, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:32:39,579 INFO [RS:1;dfce6ce5b10c:34107 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T16:32:39,579 INFO [RS:1;dfce6ce5b10c:34107 {}] regionserver.HRegionServer(1031): Exiting; stopping=dfce6ce5b10c,34107,1733761915656; zookeeper connection closed. 2024-12-09T16:32:39,580 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4cae58b1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4cae58b1 2024-12-09T16:32:39,598 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:39,617 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:39,617 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:39,617 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:39,618 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:39,618 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:39,633 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:39,633 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:39,834 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-09T16:32:39,838 DEBUG [RS:0;dfce6ce5b10c:41215 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/oldWALs 2024-12-09T16:32:39,838 INFO [RS:0;dfce6ce5b10c:41215 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog dfce6ce5b10c%2C41215%2C1733761912669.meta:.meta(num 1733761954427) 2024-12-09T16:32:39,852 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:39,853 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:39,853 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:39,853 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:39,853 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:39,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741894_1078 (size=14682) 2024-12-09T16:32:39,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741894_1078 (size=14682) 2024-12-09T16:32:39,864 DEBUG [RS:0;dfce6ce5b10c:41215 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/oldWALs 2024-12-09T16:32:39,864 INFO [RS:0;dfce6ce5b10c:41215 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog dfce6ce5b10c%2C41215%2C1733761912669:(num 1733761954145) 2024-12-09T16:32:39,864 DEBUG [RS:0;dfce6ce5b10c:41215 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:32:39,864 INFO [RS:0;dfce6ce5b10c:41215 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T16:32:39,865 INFO [RS:0;dfce6ce5b10c:41215 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T16:32:39,865 INFO [RS:0;dfce6ce5b10c:41215 {}] hbase.ChoreService(370): Chore service for: regionserver/dfce6ce5b10c:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T16:32:39,865 INFO [RS:0;dfce6ce5b10c:41215 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T16:32:39,865 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T16:32:39,866 INFO [RS:0;dfce6ce5b10c:41215 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41215 2024-12-09T16:32:39,871 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41215-0x100bfc54d170001, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/dfce6ce5b10c,41215,1733761912669 2024-12-09T16:32:39,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T16:32:39,871 INFO [RS:0;dfce6ce5b10c:41215 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T16:32:39,879 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [dfce6ce5b10c,41215,1733761912669] 2024-12-09T16:32:39,887 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/dfce6ce5b10c,41215,1733761912669 already deleted, retry=false 2024-12-09T16:32:39,887 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; dfce6ce5b10c,41215,1733761912669 expired; onlineServers=0 2024-12-09T16:32:39,887 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'dfce6ce5b10c,39507,1733761912458' ***** 2024-12-09T16:32:39,887 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T16:32:39,888 INFO [M:0;dfce6ce5b10c:39507 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T16:32:39,888 INFO [M:0;dfce6ce5b10c:39507 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T16:32:39,888 DEBUG [M:0;dfce6ce5b10c:39507 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T16:32:39,888 DEBUG [M:0;dfce6ce5b10c:39507 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T16:32:39,888 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T16:32:39,888 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.large.0-1733761914053 {}] cleaner.HFileCleaner(306): Exit Thread[master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.large.0-1733761914053,5,FailOnTimeoutGroup] 2024-12-09T16:32:39,888 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.small.0-1733761914058 {}] cleaner.HFileCleaner(306): Exit Thread[master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.small.0-1733761914058,5,FailOnTimeoutGroup] 2024-12-09T16:32:39,888 INFO [M:0;dfce6ce5b10c:39507 {}] hbase.ChoreService(370): Chore service for: master/dfce6ce5b10c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T16:32:39,888 INFO [M:0;dfce6ce5b10c:39507 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T16:32:39,888 DEBUG [M:0;dfce6ce5b10c:39507 {}] master.HMaster(1795): Stopping service threads 2024-12-09T16:32:39,888 INFO [M:0;dfce6ce5b10c:39507 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T16:32:39,888 INFO [M:0;dfce6ce5b10c:39507 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T16:32:39,889 INFO [M:0;dfce6ce5b10c:39507 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T16:32:39,889 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T16:32:39,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T16:32:39,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:32:39,905 DEBUG [M:0;dfce6ce5b10c:39507 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-12-09T16:32:39,905 DEBUG [M:0;dfce6ce5b10c:39507 {}] master.ActiveMasterManager(353): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-12-09T16:32:39,907 INFO [M:0;dfce6ce5b10c:39507 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/.lastflushedseqids 2024-12-09T16:32:39,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741910_1096 (size=130) 2024-12-09T16:32:39,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741910_1096 (size=130) 2024-12-09T16:32:39,927 INFO [M:0;dfce6ce5b10c:39507 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T16:32:39,927 INFO [M:0;dfce6ce5b10c:39507 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T16:32:39,927 DEBUG [M:0;dfce6ce5b10c:39507 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T16:32:39,927 INFO [M:0;dfce6ce5b10c:39507 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:32:39,927 DEBUG [M:0;dfce6ce5b10c:39507 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:32:39,927 DEBUG [M:0;dfce6ce5b10c:39507 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T16:32:39,927 DEBUG [M:0;dfce6ce5b10c:39507 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:32:39,927 INFO [M:0;dfce6ce5b10c:39507 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.24 KB heapSize=29.47 KB 2024-12-09T16:32:39,948 DEBUG [M:0;dfce6ce5b10c:39507 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/989d3a68e47743fc9d758fc521d1b173 is 82, key is hbase:meta,,1/info:regioninfo/1733761915369/Put/seqid=0 2024-12-09T16:32:39,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741911_1097 (size=5672) 2024-12-09T16:32:39,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741911_1097 (size=5672) 2024-12-09T16:32:39,967 INFO [M:0;dfce6ce5b10c:39507 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/989d3a68e47743fc9d758fc521d1b173 2024-12-09T16:32:39,979 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41215-0x100bfc54d170001, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:32:39,979 INFO [RS:0;dfce6ce5b10c:41215 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T16:32:39,979 DEBUG [pool-302-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41215-0x100bfc54d170001, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:32:39,979 INFO [RS:0;dfce6ce5b10c:41215 {}] regionserver.HRegionServer(1031): Exiting; stopping=dfce6ce5b10c,41215,1733761912669; zookeeper connection closed. 2024-12-09T16:32:39,980 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@569361b2 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@569361b2 2024-12-09T16:32:39,980 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-09T16:32:40,008 DEBUG [M:0;dfce6ce5b10c:39507 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f6e53f181b084a5c8e64a606560f5314 is 773, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733761916803/Put/seqid=0 2024-12-09T16:32:40,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741912_1098 (size=6254) 2024-12-09T16:32:40,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741912_1098 (size=6254) 2024-12-09T16:32:40,018 INFO [M:0;dfce6ce5b10c:39507 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f6e53f181b084a5c8e64a606560f5314 2024-12-09T16:32:40,055 INFO [M:0;dfce6ce5b10c:39507 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f6e53f181b084a5c8e64a606560f5314 2024-12-09T16:32:40,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741827_1003 (size=196) 2024-12-09T16:32:40,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741825_1001 (size=7) 2024-12-09T16:32:40,081 DEBUG [M:0;dfce6ce5b10c:39507 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/edd3c93413d24f4db011ddd6dd8ff125 is 69, key is dfce6ce5b10c,34107,1733761915656/rs:state/1733761915816/Put/seqid=0 2024-12-09T16:32:40,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741913_1099 (size=5224) 2024-12-09T16:32:40,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741913_1099 (size=5224) 2024-12-09T16:32:40,093 INFO [M:0;dfce6ce5b10c:39507 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/edd3c93413d24f4db011ddd6dd8ff125 2024-12-09T16:32:40,135 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T16:32:40,141 DEBUG [M:0;dfce6ce5b10c:39507 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c3b344ea6f864e95ae0209304c25ef47 is 52, key is load_balancer_on/state:d/1733761915619/Put/seqid=0 2024-12-09T16:32:40,153 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:40,153 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:40,153 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:40,154 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:40,154 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:40,155 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:40,161 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:40,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741914_1100 (size=5056) 2024-12-09T16:32:40,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741914_1100 (size=5056) 2024-12-09T16:32:40,165 INFO [M:0;dfce6ce5b10c:39507 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c3b344ea6f864e95ae0209304c25ef47 2024-12-09T16:32:40,172 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:40,178 DEBUG [M:0;dfce6ce5b10c:39507 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/989d3a68e47743fc9d758fc521d1b173 as hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/989d3a68e47743fc9d758fc521d1b173 2024-12-09T16:32:40,181 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-09T16:32:40,181 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T16:32:40,181 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T16:32:40,189 INFO [M:0;dfce6ce5b10c:39507 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/989d3a68e47743fc9d758fc521d1b173, entries=8, sequenceid=60, filesize=5.5 K 2024-12-09T16:32:40,190 DEBUG [M:0;dfce6ce5b10c:39507 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f6e53f181b084a5c8e64a606560f5314 as hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f6e53f181b084a5c8e64a606560f5314 2024-12-09T16:32:40,220 INFO [M:0;dfce6ce5b10c:39507 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f6e53f181b084a5c8e64a606560f5314 2024-12-09T16:32:40,221 INFO [M:0;dfce6ce5b10c:39507 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f6e53f181b084a5c8e64a606560f5314, entries=6, sequenceid=60, filesize=6.1 K 2024-12-09T16:32:40,222 DEBUG [M:0;dfce6ce5b10c:39507 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/edd3c93413d24f4db011ddd6dd8ff125 as hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/edd3c93413d24f4db011ddd6dd8ff125 2024-12-09T16:32:40,230 INFO [M:0;dfce6ce5b10c:39507 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/edd3c93413d24f4db011ddd6dd8ff125, entries=2, sequenceid=60, filesize=5.1 K 2024-12-09T16:32:40,231 DEBUG [M:0;dfce6ce5b10c:39507 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/c3b344ea6f864e95ae0209304c25ef47 as hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c3b344ea6f864e95ae0209304c25ef47 2024-12-09T16:32:40,245 INFO [M:0;dfce6ce5b10c:39507 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/c3b344ea6f864e95ae0209304c25ef47, entries=1, sequenceid=60, filesize=4.9 K 2024-12-09T16:32:40,262 INFO [M:0;dfce6ce5b10c:39507 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 335ms, sequenceid=60, compaction requested=false 2024-12-09T16:32:40,311 INFO [M:0;dfce6ce5b10c:39507 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:32:40,311 DEBUG [M:0;dfce6ce5b10c:39507 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733761959927Disabling compacts and flushes for region at 1733761959927Disabling writes for close at 1733761959927Obtaining lock to block concurrent updates at 1733761959927Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733761959927Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23793, getHeapSize=30112, getOffHeapSize=0, getCellsCount=71 at 1733761959928 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733761959929 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733761959929Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733761959948 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733761959948Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733761959975 (+27 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733761960007 (+32 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733761960007Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733761960055 (+48 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733761960080 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733761960080Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733761960105 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733761960140 (+35 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733761960140Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@39e21114: reopening flushed file at 1733761960176 (+36 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4f1fd75d: reopening flushed file at 1733761960189 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@de62b36: reopening flushed file at 1733761960221 (+32 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5fd12a28: reopening flushed file at 1733761960230 (+9 ms)Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 335ms, sequenceid=60, compaction requested=false at 1733761960262 (+32 ms)Writing region close event to WAL at 1733761960311 (+49 ms)Closed at 1733761960311 2024-12-09T16:32:40,313 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:40,313 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:40,313 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:40,314 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:40,314 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:32:40,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43507 is added to blk_1073741890_1073 (size=1045) 2024-12-09T16:32:40,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741890_1073 (size=1045) 2024-12-09T16:32:40,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:40,487 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:41,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741836_1012 (size=76) 2024-12-09T16:32:41,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741832_1008 (size=32) 2024-12-09T16:32:41,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:41,488 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:42,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:42,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:43,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741826_1002 (size=42) 2024-12-09T16:32:43,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42103 is added to blk_1073741828_1004 (size=1189) 2024-12-09T16:32:43,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:43,490 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:44,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:44,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:45,080 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7fb97650 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-2100637919-172.17.0.2-1733761910595:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:40975,null,null]) java.net.ConnectException: Call From dfce6ce5b10c/172.17.0.2 to localhost:33373 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-09T16:32:45,322 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-09T16:32:45,322 INFO [M:0;dfce6ce5b10c:39507 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T16:32:45,322 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T16:32:45,322 INFO [M:0;dfce6ce5b10c:39507 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39507 2024-12-09T16:32:45,323 INFO [M:0;dfce6ce5b10c:39507 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T16:32:45,437 INFO [M:0;dfce6ce5b10c:39507 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T16:32:45,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:32:45,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39507-0x100bfc54d170000, quorum=127.0.0.1:62591, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:32:45,441 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@74e983d1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:32:45,441 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@48ed1e93{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:32:45,441 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:32:45,442 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@721d4444{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:32:45,442 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c767c56{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/hadoop.log.dir/,STOPPED} 2024-12-09T16:32:45,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:45,446 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7fb97650 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-2100637919-172.17.0.2-1733761910595:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:40975,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:33373 , LocalHost:localPort dfce6ce5b10c/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-09T16:32:45,448 WARN [BP-2100637919-172.17.0.2-1733761910595 heartbeating to localhost/127.0.0.1:38821 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2100637919-172.17.0.2-1733761910595 (Datanode Uuid 07f6efee-23eb-4e00-b421-21c4606f42e4) service to localhost/127.0.0.1:38821 2024-12-09T16:32:45,449 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data4/current/BP-2100637919-172.17.0.2-1733761910595 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:32:45,450 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T16:32:45,454 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data3/current/BP-2100637919-172.17.0.2-1733761910595 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:32:45,459 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@70fc09c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:32:45,460 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@10ee7da1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:32:45,460 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:32:45,460 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30b0250{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:32:45,460 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@630c7f7e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/hadoop.log.dir/,STOPPED} 2024-12-09T16:32:45,466 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T16:32:45,466 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T16:32:45,469 WARN [BP-2100637919-172.17.0.2-1733761910595 heartbeating to localhost/127.0.0.1:38821 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T16:32:45,469 WARN [BP-2100637919-172.17.0.2-1733761910595 heartbeating to localhost/127.0.0.1:38821 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2100637919-172.17.0.2-1733761910595 (Datanode Uuid d661179c-c250-402b-a520-ede73037293f) service to localhost/127.0.0.1:38821 2024-12-09T16:32:45,470 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data9/current/BP-2100637919-172.17.0.2-1733761910595 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:32:45,470 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/cluster_e976c725-10f5-9573-b9ea-380eeec29855/data/data10/current/BP-2100637919-172.17.0.2-1733761910595 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:32:45,471 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T16:32:45,483 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@54ddfc85{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T16:32:45,484 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7cc16daa{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:32:45,484 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:32:45,484 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2e477d02{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:32:45,484 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@223874d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/hadoop.log.dir/,STOPPED} 2024-12-09T16:32:45,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:45,499 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T16:32:45,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T16:32:45,559 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=155 (was 80) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007fbc78bf6848.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:38821 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:38821 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:38821 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007fbc78bf6848.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38821 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34925 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38821 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$898/0x00007fbc78bf6848.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:38821 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:38821 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38821 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:34925 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38821 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38821 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Command processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Potentially hanging thread: nioEventLoopGroup-19-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:38821 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Command processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Potentially hanging thread: nioEventLoopGroup-19-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=436 (was 405) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=518 (was 644), ProcessCount=11 (was 11), AvailableMemoryMB=6760 (was 7501) 2024-12-09T16:32:45,571 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=155, OpenFileDescriptor=436, MaxFileDescriptor=1048576, SystemLoadAverage=518, ProcessCount=11, AvailableMemoryMB=6759 2024-12-09T16:32:45,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T16:32:45,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/hadoop.log.dir so I do NOT create it in target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a 2024-12-09T16:32:45,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b039934a-b7f8-fef7-2962-6bb61e4e665d/hadoop.tmp.dir so I do NOT create it in target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a 2024-12-09T16:32:45,571 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/cluster_89c688b7-6653-d246-c2bd-933622a33f0f, deleteOnExit=true 2024-12-09T16:32:45,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T16:32:45,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/test.cache.data in system properties and HBase conf 2024-12-09T16:32:45,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T16:32:45,571 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/hadoop.log.dir in system properties and HBase conf 2024-12-09T16:32:45,572 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T16:32:45,572 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T16:32:45,572 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T16:32:45,572 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T16:32:45,572 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T16:32:45,572 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T16:32:45,572 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T16:32:45,572 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T16:32:45,572 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T16:32:45,572 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T16:32:45,572 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T16:32:45,573 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T16:32:45,573 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T16:32:45,573 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/nfs.dump.dir in system properties and HBase conf 2024-12-09T16:32:45,573 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/java.io.tmpdir in system properties and HBase conf 2024-12-09T16:32:45,573 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T16:32:45,573 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T16:32:45,573 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T16:32:45,591 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T16:32:45,684 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T16:32:45,700 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:45,701 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:45,701 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:45,701 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:45,701 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:45,702 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:45,705 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:45,705 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:45,705 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:45,708 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:45,880 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:32:45,898 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:32:45,902 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:32:45,902 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:32:45,902 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T16:32:45,906 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:32:45,909 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65a49e29{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:32:45,909 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21b0ad14{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:32:46,023 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1411600e{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/java.io.tmpdir/jetty-localhost-39607-hadoop-hdfs-3_4_1-tests_jar-_-any-15027594487261336886/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T16:32:46,023 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@64c51d47{HTTP/1.1, (http/1.1)}{localhost:39607} 2024-12-09T16:32:46,023 INFO [Time-limited test {}] server.Server(415): Started @165189ms 2024-12-09T16:32:46,036 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T16:32:46,058 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:46,263 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:32:46,266 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:32:46,267 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:32:46,267 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:32:46,267 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T16:32:46,270 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b804995{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:32:46,271 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5fe94dda{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:32:46,388 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2ad27871{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/java.io.tmpdir/jetty-localhost-38313-hadoop-hdfs-3_4_1-tests_jar-_-any-12381668154753454521/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:32:46,388 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3275e649{HTTP/1.1, (http/1.1)}{localhost:38313} 2024-12-09T16:32:46,389 INFO [Time-limited test {}] server.Server(415): Started @165555ms 2024-12-09T16:32:46,390 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T16:32:46,428 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:32:46,432 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:32:46,433 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:32:46,433 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:32:46,433 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T16:32:46,434 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@219aea9f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:32:46,434 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2380c17e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:32:46,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:46,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:46,529 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@47718438{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/java.io.tmpdir/jetty-localhost-36675-hadoop-hdfs-3_4_1-tests_jar-_-any-11869433656157285570/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:32:46,529 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6d44665f{HTTP/1.1, (http/1.1)}{localhost:36675} 2024-12-09T16:32:46,529 INFO [Time-limited test {}] server.Server(415): Started @165695ms 2024-12-09T16:32:46,531 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T16:32:47,059 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:47,160 WARN [Thread-1202 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/cluster_89c688b7-6653-d246-c2bd-933622a33f0f/data/data2/current/BP-985864207-172.17.0.2-1733761965604/current, will proceed with Du for space computation calculation, 2024-12-09T16:32:47,160 WARN [Thread-1201 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/cluster_89c688b7-6653-d246-c2bd-933622a33f0f/data/data1/current/BP-985864207-172.17.0.2-1733761965604/current, will proceed with Du for space computation calculation, 2024-12-09T16:32:47,205 WARN [Thread-1165 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T16:32:47,210 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf1daff0607cc5ff3 with lease ID 0x12ab51799507c38c: Processing first storage report for DS-e5a10720-c573-4ca0-affc-a9872dd6d17c from datanode DatanodeRegistration(127.0.0.1:38629, datanodeUuid=f013fb29-3851-41b0-b30a-5cd23a07e941, infoPort=34007, infoSecurePort=0, ipcPort=45101, storageInfo=lv=-57;cid=testClusterID;nsid=191725825;c=1733761965604) 2024-12-09T16:32:47,210 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf1daff0607cc5ff3 with lease ID 0x12ab51799507c38c: from storage DS-e5a10720-c573-4ca0-affc-a9872dd6d17c node DatanodeRegistration(127.0.0.1:38629, datanodeUuid=f013fb29-3851-41b0-b30a-5cd23a07e941, infoPort=34007, infoSecurePort=0, ipcPort=45101, storageInfo=lv=-57;cid=testClusterID;nsid=191725825;c=1733761965604), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:32:47,211 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf1daff0607cc5ff3 with lease ID 0x12ab51799507c38c: Processing first storage report for DS-dae9b003-60d2-4a8c-849c-33098f5e9641 from datanode DatanodeRegistration(127.0.0.1:38629, datanodeUuid=f013fb29-3851-41b0-b30a-5cd23a07e941, infoPort=34007, infoSecurePort=0, ipcPort=45101, storageInfo=lv=-57;cid=testClusterID;nsid=191725825;c=1733761965604) 2024-12-09T16:32:47,211 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf1daff0607cc5ff3 with lease ID 0x12ab51799507c38c: from storage DS-dae9b003-60d2-4a8c-849c-33098f5e9641 node DatanodeRegistration(127.0.0.1:38629, datanodeUuid=f013fb29-3851-41b0-b30a-5cd23a07e941, infoPort=34007, infoSecurePort=0, ipcPort=45101, storageInfo=lv=-57;cid=testClusterID;nsid=191725825;c=1733761965604), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:32:47,310 WARN [Thread-1212 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/cluster_89c688b7-6653-d246-c2bd-933622a33f0f/data/data3/current/BP-985864207-172.17.0.2-1733761965604/current, will proceed with Du for space computation calculation, 2024-12-09T16:32:47,311 WARN [Thread-1213 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/cluster_89c688b7-6653-d246-c2bd-933622a33f0f/data/data4/current/BP-985864207-172.17.0.2-1733761965604/current, will proceed with Du for space computation calculation, 2024-12-09T16:32:47,341 WARN [Thread-1188 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T16:32:47,344 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdb57c48da4acfda2 with lease ID 0x12ab51799507c38d: Processing first storage report for DS-74ddcad1-451e-4142-b42a-148128315b3b from datanode DatanodeRegistration(127.0.0.1:39347, datanodeUuid=f7f267ed-ff2e-4163-a258-aa4a3188f4b7, infoPort=34009, infoSecurePort=0, ipcPort=36713, storageInfo=lv=-57;cid=testClusterID;nsid=191725825;c=1733761965604) 2024-12-09T16:32:47,344 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdb57c48da4acfda2 with lease ID 0x12ab51799507c38d: from storage DS-74ddcad1-451e-4142-b42a-148128315b3b node DatanodeRegistration(127.0.0.1:39347, datanodeUuid=f7f267ed-ff2e-4163-a258-aa4a3188f4b7, infoPort=34009, infoSecurePort=0, ipcPort=36713, storageInfo=lv=-57;cid=testClusterID;nsid=191725825;c=1733761965604), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:32:47,344 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdb57c48da4acfda2 with lease ID 0x12ab51799507c38d: Processing first storage report for DS-e651beaf-7ec1-44fd-8b4c-051c3f818348 from datanode DatanodeRegistration(127.0.0.1:39347, datanodeUuid=f7f267ed-ff2e-4163-a258-aa4a3188f4b7, infoPort=34009, infoSecurePort=0, ipcPort=36713, storageInfo=lv=-57;cid=testClusterID;nsid=191725825;c=1733761965604) 2024-12-09T16:32:47,344 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdb57c48da4acfda2 with lease ID 0x12ab51799507c38d: from storage DS-e651beaf-7ec1-44fd-8b4c-051c3f818348 node DatanodeRegistration(127.0.0.1:39347, datanodeUuid=f7f267ed-ff2e-4163-a258-aa4a3188f4b7, infoPort=34009, infoSecurePort=0, ipcPort=36713, storageInfo=lv=-57;cid=testClusterID;nsid=191725825;c=1733761965604), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:32:47,369 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a 2024-12-09T16:32:47,375 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/cluster_89c688b7-6653-d246-c2bd-933622a33f0f/zookeeper_0, clientPort=59507, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/cluster_89c688b7-6653-d246-c2bd-933622a33f0f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/cluster_89c688b7-6653-d246-c2bd-933622a33f0f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T16:32:47,376 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59507 2024-12-09T16:32:47,377 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:32:47,378 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:32:47,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39347 is added to blk_1073741825_1001 (size=7) 2024-12-09T16:32:47,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38629 is added to blk_1073741825_1001 (size=7) 2024-12-09T16:32:47,399 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e with version=8 2024-12-09T16:32:47,399 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/hbase-staging 2024-12-09T16:32:47,402 INFO [Time-limited test {}] client.ConnectionUtils(128): master/dfce6ce5b10c:0 server-side Connection retries=45 2024-12-09T16:32:47,402 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:32:47,402 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T16:32:47,403 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T16:32:47,403 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:32:47,403 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T16:32:47,403 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T16:32:47,403 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T16:32:47,405 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38925 2024-12-09T16:32:47,407 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38925 connecting to ZooKeeper ensemble=127.0.0.1:59507 2024-12-09T16:32:47,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:47,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:389250x0, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T16:32:47,450 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38925-0x100bfc623c50000 connected 2024-12-09T16:32:47,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:47,537 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:32:47,540 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:32:47,543 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:32:47,543 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e, hbase.cluster.distributed=false 2024-12-09T16:32:47,546 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T16:32:47,546 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38925 2024-12-09T16:32:47,550 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38925 2024-12-09T16:32:47,550 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38925 2024-12-09T16:32:47,551 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38925 2024-12-09T16:32:47,554 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38925 2024-12-09T16:32:47,574 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/dfce6ce5b10c:0 server-side Connection retries=45 2024-12-09T16:32:47,574 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:32:47,574 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T16:32:47,574 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T16:32:47,574 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:32:47,574 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T16:32:47,575 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T16:32:47,575 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T16:32:47,577 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38195 2024-12-09T16:32:47,578 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38195 connecting to ZooKeeper ensemble=127.0.0.1:59507 2024-12-09T16:32:47,579 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:32:47,582 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:32:47,596 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:381950x0, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T16:32:47,596 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:381950x0, quorum=127.0.0.1:59507, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:32:47,597 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T16:32:47,600 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38195-0x100bfc623c50001 connected 2024-12-09T16:32:47,600 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T16:32:47,601 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38195-0x100bfc623c50001, quorum=127.0.0.1:59507, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T16:32:47,603 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38195-0x100bfc623c50001, quorum=127.0.0.1:59507, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T16:32:47,610 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38195 2024-12-09T16:32:47,610 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38195 2024-12-09T16:32:47,614 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38195 2024-12-09T16:32:47,614 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38195 2024-12-09T16:32:47,622 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38195 2024-12-09T16:32:47,639 DEBUG [M:0;dfce6ce5b10c:38925 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;dfce6ce5b10c:38925 2024-12-09T16:32:47,642 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/dfce6ce5b10c,38925,1733761967402 2024-12-09T16:32:47,653 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38195-0x100bfc623c50001, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:32:47,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:32:47,654 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/dfce6ce5b10c,38925,1733761967402 2024-12-09T16:32:47,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:32:47,662 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38195-0x100bfc623c50001, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T16:32:47,662 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38195-0x100bfc623c50001, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:32:47,662 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T16:32:47,663 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/dfce6ce5b10c,38925,1733761967402 from backup master directory 2024-12-09T16:32:47,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/dfce6ce5b10c,38925,1733761967402 2024-12-09T16:32:47,670 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38195-0x100bfc623c50001, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:32:47,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:32:47,674 WARN [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T16:32:47,674 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=dfce6ce5b10c,38925,1733761967402 2024-12-09T16:32:47,685 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/hbase.id] with ID: 4f964147-b529-4216-b6ca-d7b505b54c73 2024-12-09T16:32:47,685 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/.tmp/hbase.id 2024-12-09T16:32:47,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38629 is added to blk_1073741826_1002 (size=42) 2024-12-09T16:32:47,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39347 is added to blk_1073741826_1002 (size=42) 2024-12-09T16:32:47,712 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/.tmp/hbase.id]:[hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/hbase.id] 2024-12-09T16:32:47,744 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:32:47,744 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T16:32:47,746 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-09T16:32:47,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:32:47,754 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38195-0x100bfc623c50001, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:32:47,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39347 is added to blk_1073741827_1003 (size=196) 2024-12-09T16:32:47,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38629 is added to blk_1073741827_1003 (size=196) 2024-12-09T16:32:47,785 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T16:32:47,786 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T16:32:47,786 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T16:32:47,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38629 is added to blk_1073741828_1004 (size=1189) 2024-12-09T16:32:47,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39347 is added to blk_1073741828_1004 (size=1189) 2024-12-09T16:32:47,826 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/data/master/store 2024-12-09T16:32:47,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39347 is added to blk_1073741829_1005 (size=34) 2024-12-09T16:32:47,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38629 is added to blk_1073741829_1005 (size=34) 2024-12-09T16:32:47,863 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:32:47,864 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T16:32:47,864 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:32:47,864 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:32:47,864 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T16:32:47,864 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:32:47,864 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:32:47,864 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733761967864Disabling compacts and flushes for region at 1733761967864Disabling writes for close at 1733761967864Writing region close event to WAL at 1733761967864Closed at 1733761967864 2024-12-09T16:32:47,866 WARN [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/data/master/store/.initializing 2024-12-09T16:32:47,866 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/WALs/dfce6ce5b10c,38925,1733761967402 2024-12-09T16:32:47,876 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=dfce6ce5b10c%2C38925%2C1733761967402, suffix=, logDir=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/WALs/dfce6ce5b10c,38925,1733761967402, archiveDir=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/oldWALs, maxLogs=10 2024-12-09T16:32:47,876 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C38925%2C1733761967402.1733761967876 2024-12-09T16:32:47,894 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/WALs/dfce6ce5b10c,38925,1733761967402/dfce6ce5b10c%2C38925%2C1733761967402.1733761967876 2024-12-09T16:32:47,902 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34009:34009),(127.0.0.1/127.0.0.1:34007:34007)] 2024-12-09T16:32:47,905 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T16:32:47,905 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:32:47,905 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:32:47,905 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:32:47,907 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:32:47,909 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T16:32:47,909 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:32:47,910 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:32:47,910 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:32:47,912 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T16:32:47,912 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:32:47,912 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:32:47,912 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:32:47,914 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T16:32:47,914 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:32:47,915 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:32:47,915 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:32:47,917 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T16:32:47,917 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:32:47,918 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:32:47,918 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:32:47,919 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:32:47,919 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:32:47,921 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:32:47,921 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:32:47,922 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T16:32:47,929 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:32:47,935 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T16:32:47,936 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=815008, jitterRate=0.036337316036224365}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T16:32:47,936 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733761967905Initializing all the Stores at 1733761967907 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733761967907Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733761967907Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733761967907Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733761967907Cleaning up temporary data from old regions at 1733761967921 (+14 ms)Region opened successfully at 1733761967936 (+15 ms) 2024-12-09T16:32:47,937 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T16:32:47,941 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4667fbb5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=dfce6ce5b10c/172.17.0.2:0 2024-12-09T16:32:47,943 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T16:32:47,943 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T16:32:47,943 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T16:32:47,943 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T16:32:47,944 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T16:32:47,945 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T16:32:47,945 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T16:32:47,953 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T16:32:47,955 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T16:32:47,962 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T16:32:47,962 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T16:32:47,964 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T16:32:47,970 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T16:32:47,971 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T16:32:47,973 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T16:32:47,986 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T16:32:47,988 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T16:32:47,998 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T16:32:48,005 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T16:32:48,012 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T16:32:48,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T16:32:48,020 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38195-0x100bfc623c50001, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T16:32:48,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:32:48,020 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38195-0x100bfc623c50001, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:32:48,021 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=dfce6ce5b10c,38925,1733761967402, sessionid=0x100bfc623c50000, setting cluster-up flag (Was=false) 2024-12-09T16:32:48,037 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38195-0x100bfc623c50001, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:32:48,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:32:48,059 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:48,062 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T16:32:48,064 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=dfce6ce5b10c,38925,1733761967402 2024-12-09T16:32:48,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:32:48,081 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38195-0x100bfc623c50001, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:32:48,104 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T16:32:48,106 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=dfce6ce5b10c,38925,1733761967402 2024-12-09T16:32:48,127 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T16:32:48,130 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T16:32:48,130 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T16:32:48,131 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T16:32:48,131 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: dfce6ce5b10c,38925,1733761967402 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T16:32:48,133 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/dfce6ce5b10c:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:32:48,133 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/dfce6ce5b10c:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:32:48,133 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:32:48,133 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:32:48,133 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/dfce6ce5b10c:0, corePoolSize=10, maxPoolSize=10 2024-12-09T16:32:48,133 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:32:48,133 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=2, maxPoolSize=2 2024-12-09T16:32:48,133 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:32:48,151 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T16:32:48,151 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T16:32:48,153 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:32:48,153 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T16:32:48,174 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733761998174 2024-12-09T16:32:48,174 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T16:32:48,174 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T16:32:48,175 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T16:32:48,175 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T16:32:48,175 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T16:32:48,175 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T16:32:48,186 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T16:32:48,193 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T16:32:48,193 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T16:32:48,193 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T16:32:48,196 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T16:32:48,196 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T16:32:48,196 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.large.0-1733761968196,5,FailOnTimeoutGroup] 2024-12-09T16:32:48,197 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.small.0-1733761968196,5,FailOnTimeoutGroup] 2024-12-09T16:32:48,197 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T16:32:48,197 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T16:32:48,197 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T16:32:48,197 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T16:32:48,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39347 is added to blk_1073741831_1007 (size=1321) 2024-12-09T16:32:48,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38629 is added to blk_1073741831_1007 (size=1321) 2024-12-09T16:32:48,203 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T16:32:48,204 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e 2024-12-09T16:32:48,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39347 is added to blk_1073741832_1008 (size=32) 2024-12-09T16:32:48,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38629 is added to blk_1073741832_1008 (size=32) 2024-12-09T16:32:48,234 INFO [RS:0;dfce6ce5b10c:38195 {}] regionserver.HRegionServer(746): ClusterId : 4f964147-b529-4216-b6ca-d7b505b54c73 2024-12-09T16:32:48,234 DEBUG [RS:0;dfce6ce5b10c:38195 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T16:32:48,234 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:32:48,246 DEBUG [RS:0;dfce6ce5b10c:38195 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T16:32:48,247 DEBUG [RS:0;dfce6ce5b10c:38195 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T16:32:48,250 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T16:32:48,254 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T16:32:48,254 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:32:48,254 DEBUG [RS:0;dfce6ce5b10c:38195 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T16:32:48,255 DEBUG [RS:0;dfce6ce5b10c:38195 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b0ad78a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=dfce6ce5b10c/172.17.0.2:0 2024-12-09T16:32:48,266 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:32:48,266 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T16:32:48,273 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T16:32:48,273 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:32:48,275 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:32:48,275 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T16:32:48,277 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T16:32:48,278 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:32:48,279 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:32:48,279 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T16:32:48,282 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T16:32:48,282 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:32:48,283 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:32:48,283 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T16:32:48,284 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/hbase/meta/1588230740 2024-12-09T16:32:48,285 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/hbase/meta/1588230740 2024-12-09T16:32:48,288 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T16:32:48,288 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T16:32:48,289 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T16:32:48,292 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T16:32:48,299 DEBUG [RS:0;dfce6ce5b10c:38195 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;dfce6ce5b10c:38195 2024-12-09T16:32:48,299 INFO [RS:0;dfce6ce5b10c:38195 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T16:32:48,299 INFO [RS:0;dfce6ce5b10c:38195 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T16:32:48,299 DEBUG [RS:0;dfce6ce5b10c:38195 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T16:32:48,302 INFO [RS:0;dfce6ce5b10c:38195 {}] regionserver.HRegionServer(2659): reportForDuty to master=dfce6ce5b10c,38925,1733761967402 with port=38195, startcode=1733761967572 2024-12-09T16:32:48,305 DEBUG [RS:0;dfce6ce5b10c:38195 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T16:32:48,318 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T16:32:48,324 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41835, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T16:32:48,324 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38925 {}] master.ServerManager(363): Checking decommissioned status of RegionServer dfce6ce5b10c,38195,1733761967572 2024-12-09T16:32:48,325 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38925 {}] master.ServerManager(517): Registering regionserver=dfce6ce5b10c,38195,1733761967572 2024-12-09T16:32:48,326 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=702487, jitterRate=-0.10674162209033966}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T16:32:48,326 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733761968234Initializing all the Stores at 1733761968236 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733761968236Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733761968248 (+12 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733761968248Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733761968248Cleaning up temporary data from old regions at 1733761968288 (+40 ms)Region opened successfully at 1733761968326 (+38 ms) 2024-12-09T16:32:48,326 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T16:32:48,326 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T16:32:48,327 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T16:32:48,327 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T16:32:48,327 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T16:32:48,330 DEBUG [RS:0;dfce6ce5b10c:38195 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e 2024-12-09T16:32:48,330 DEBUG [RS:0;dfce6ce5b10c:38195 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37495 2024-12-09T16:32:48,330 DEBUG [RS:0;dfce6ce5b10c:38195 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T16:32:48,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T16:32:48,338 DEBUG [RS:0;dfce6ce5b10c:38195 {}] zookeeper.ZKUtil(111): regionserver:38195-0x100bfc623c50001, quorum=127.0.0.1:59507, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/dfce6ce5b10c,38195,1733761967572 2024-12-09T16:32:48,338 WARN [RS:0;dfce6ce5b10c:38195 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T16:32:48,338 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T16:32:48,338 INFO [RS:0;dfce6ce5b10c:38195 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T16:32:48,338 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733761968326Disabling compacts and flushes for region at 1733761968326Disabling writes for close at 1733761968327 (+1 ms)Writing region close event to WAL at 1733761968338 (+11 ms)Closed at 1733761968338 2024-12-09T16:32:48,338 DEBUG [RS:0;dfce6ce5b10c:38195 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572 2024-12-09T16:32:48,340 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T16:32:48,341 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T16:32:48,341 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T16:32:48,345 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T16:32:48,347 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T16:32:48,355 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [dfce6ce5b10c,38195,1733761967572] 2024-12-09T16:32:48,364 INFO [RS:0;dfce6ce5b10c:38195 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T16:32:48,378 INFO [RS:0;dfce6ce5b10c:38195 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T16:32:48,386 INFO [RS:0;dfce6ce5b10c:38195 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T16:32:48,386 INFO [RS:0;dfce6ce5b10c:38195 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:32:48,389 INFO [RS:0;dfce6ce5b10c:38195 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T16:32:48,395 INFO [RS:0;dfce6ce5b10c:38195 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T16:32:48,395 INFO [RS:0;dfce6ce5b10c:38195 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T16:32:48,396 DEBUG [RS:0;dfce6ce5b10c:38195 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:32:48,396 DEBUG [RS:0;dfce6ce5b10c:38195 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:32:48,396 DEBUG [RS:0;dfce6ce5b10c:38195 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:32:48,396 DEBUG [RS:0;dfce6ce5b10c:38195 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:32:48,396 DEBUG [RS:0;dfce6ce5b10c:38195 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:32:48,396 DEBUG [RS:0;dfce6ce5b10c:38195 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/dfce6ce5b10c:0, corePoolSize=2, maxPoolSize=2 2024-12-09T16:32:48,396 DEBUG [RS:0;dfce6ce5b10c:38195 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:32:48,396 DEBUG [RS:0;dfce6ce5b10c:38195 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:32:48,396 DEBUG [RS:0;dfce6ce5b10c:38195 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:32:48,396 DEBUG [RS:0;dfce6ce5b10c:38195 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:32:48,396 DEBUG [RS:0;dfce6ce5b10c:38195 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:32:48,397 DEBUG [RS:0;dfce6ce5b10c:38195 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:32:48,397 DEBUG [RS:0;dfce6ce5b10c:38195 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/dfce6ce5b10c:0, corePoolSize=3, maxPoolSize=3 2024-12-09T16:32:48,397 DEBUG [RS:0;dfce6ce5b10c:38195 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0, corePoolSize=3, maxPoolSize=3 2024-12-09T16:32:48,401 INFO [RS:0;dfce6ce5b10c:38195 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T16:32:48,401 INFO [RS:0;dfce6ce5b10c:38195 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T16:32:48,401 INFO [RS:0;dfce6ce5b10c:38195 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:32:48,401 INFO [RS:0;dfce6ce5b10c:38195 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T16:32:48,402 INFO [RS:0;dfce6ce5b10c:38195 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T16:32:48,402 INFO [RS:0;dfce6ce5b10c:38195 {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,38195,1733761967572-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T16:32:48,428 INFO [RS:0;dfce6ce5b10c:38195 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T16:32:48,428 INFO [RS:0;dfce6ce5b10c:38195 {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,38195,1733761967572-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:32:48,428 INFO [RS:0;dfce6ce5b10c:38195 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:32:48,428 INFO [RS:0;dfce6ce5b10c:38195 {}] regionserver.Replication(171): dfce6ce5b10c,38195,1733761967572 started 2024-12-09T16:32:48,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:48,450 INFO [RS:0;dfce6ce5b10c:38195 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:32:48,450 INFO [RS:0;dfce6ce5b10c:38195 {}] regionserver.HRegionServer(1482): Serving as dfce6ce5b10c,38195,1733761967572, RpcServer on dfce6ce5b10c/172.17.0.2:38195, sessionid=0x100bfc623c50001 2024-12-09T16:32:48,450 DEBUG [RS:0;dfce6ce5b10c:38195 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T16:32:48,450 DEBUG [RS:0;dfce6ce5b10c:38195 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager dfce6ce5b10c,38195,1733761967572 2024-12-09T16:32:48,450 DEBUG [RS:0;dfce6ce5b10c:38195 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'dfce6ce5b10c,38195,1733761967572' 2024-12-09T16:32:48,450 DEBUG [RS:0;dfce6ce5b10c:38195 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T16:32:48,451 DEBUG [RS:0;dfce6ce5b10c:38195 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T16:32:48,452 DEBUG [RS:0;dfce6ce5b10c:38195 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T16:32:48,452 DEBUG [RS:0;dfce6ce5b10c:38195 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T16:32:48,452 DEBUG [RS:0;dfce6ce5b10c:38195 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager dfce6ce5b10c,38195,1733761967572 2024-12-09T16:32:48,452 DEBUG [RS:0;dfce6ce5b10c:38195 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'dfce6ce5b10c,38195,1733761967572' 2024-12-09T16:32:48,452 DEBUG [RS:0;dfce6ce5b10c:38195 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T16:32:48,452 DEBUG [RS:0;dfce6ce5b10c:38195 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T16:32:48,453 DEBUG [RS:0;dfce6ce5b10c:38195 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T16:32:48,453 INFO [RS:0;dfce6ce5b10c:38195 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T16:32:48,453 INFO [RS:0;dfce6ce5b10c:38195 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T16:32:48,497 WARN [dfce6ce5b10c:38925 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T16:32:48,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:48,556 INFO [RS:0;dfce6ce5b10c:38195 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=dfce6ce5b10c%2C38195%2C1733761967572, suffix=, logDir=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572, archiveDir=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/oldWALs, maxLogs=32 2024-12-09T16:32:48,561 INFO [RS:0;dfce6ce5b10c:38195 {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C38195%2C1733761967572.1733761968561 2024-12-09T16:32:48,589 INFO [RS:0;dfce6ce5b10c:38195 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761968561 2024-12-09T16:32:48,614 DEBUG [RS:0;dfce6ce5b10c:38195 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34009:34009),(127.0.0.1/127.0.0.1:34007:34007)] 2024-12-09T16:32:48,748 DEBUG [dfce6ce5b10c:38925 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T16:32:48,748 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=dfce6ce5b10c,38195,1733761967572 2024-12-09T16:32:48,750 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as dfce6ce5b10c,38195,1733761967572, state=OPENING 2024-12-09T16:32:48,803 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T16:32:48,812 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38195-0x100bfc623c50001, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:32:48,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:32:48,812 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:32:48,812 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T16:32:48,812 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:32:48,813 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=dfce6ce5b10c,38195,1733761967572}] 2024-12-09T16:32:48,967 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T16:32:48,969 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44981, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T16:32:48,974 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T16:32:48,975 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T16:32:48,977 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=dfce6ce5b10c%2C38195%2C1733761967572.meta, suffix=.meta, logDir=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572, archiveDir=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/oldWALs, maxLogs=32 2024-12-09T16:32:48,978 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C38195%2C1733761967572.meta.1733761968978.meta 2024-12-09T16:32:49,001 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.meta.1733761968978.meta 2024-12-09T16:32:49,008 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34009:34009),(127.0.0.1/127.0.0.1:34007:34007)] 2024-12-09T16:32:49,013 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T16:32:49,014 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T16:32:49,014 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T16:32:49,014 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T16:32:49,014 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T16:32:49,015 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:32:49,015 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T16:32:49,015 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T16:32:49,017 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T16:32:49,018 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T16:32:49,018 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:32:49,019 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:32:49,019 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T16:32:49,020 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T16:32:49,021 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:32:49,021 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:32:49,021 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T16:32:49,022 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T16:32:49,022 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:32:49,023 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:32:49,023 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T16:32:49,024 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T16:32:49,024 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:32:49,025 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:32:49,025 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T16:32:49,026 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/hbase/meta/1588230740 2024-12-09T16:32:49,028 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/hbase/meta/1588230740 2024-12-09T16:32:49,031 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T16:32:49,031 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T16:32:49,032 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T16:32:49,034 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T16:32:49,036 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=826293, jitterRate=0.050686076283454895}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T16:32:49,036 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T16:32:49,037 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733761969015Writing region info on filesystem at 1733761969015Initializing all the Stores at 1733761969016 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733761969016Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733761969017 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733761969017Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733761969017Cleaning up temporary data from old regions at 1733761969031 (+14 ms)Running coprocessor post-open hooks at 1733761969036 (+5 ms)Region opened successfully at 1733761969037 (+1 ms) 2024-12-09T16:32:49,038 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733761968966 2024-12-09T16:32:49,041 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T16:32:49,041 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T16:32:49,048 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=dfce6ce5b10c,38195,1733761967572 2024-12-09T16:32:49,049 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as dfce6ce5b10c,38195,1733761967572, state=OPEN 2024-12-09T16:32:49,060 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:49,099 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38195-0x100bfc623c50001, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T16:32:49,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T16:32:49,099 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=dfce6ce5b10c,38195,1733761967572 2024-12-09T16:32:49,099 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:32:49,099 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:32:49,104 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T16:32:49,105 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=dfce6ce5b10c,38195,1733761967572 in 287 msec 2024-12-09T16:32:49,108 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T16:32:49,109 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 763 msec 2024-12-09T16:32:49,110 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T16:32:49,110 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T16:32:49,111 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T16:32:49,112 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=dfce6ce5b10c,38195,1733761967572, seqNum=-1] 2024-12-09T16:32:49,112 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T16:32:49,114 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42379, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T16:32:49,123 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 992 msec 2024-12-09T16:32:49,124 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733761969124, completionTime=-1 2024-12-09T16:32:49,124 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T16:32:49,124 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T16:32:49,127 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T16:32:49,127 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733762029127 2024-12-09T16:32:49,127 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733762089127 2024-12-09T16:32:49,127 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-09T16:32:49,127 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,38925,1733761967402-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:32:49,128 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,38925,1733761967402-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:32:49,128 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,38925,1733761967402-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:32:49,128 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-dfce6ce5b10c:38925, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:32:49,128 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T16:32:49,128 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T16:32:49,131 DEBUG [master/dfce6ce5b10c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T16:32:49,134 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.460sec 2024-12-09T16:32:49,135 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T16:32:49,135 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T16:32:49,135 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T16:32:49,135 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T16:32:49,135 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T16:32:49,135 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,38925,1733761967402-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T16:32:49,135 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,38925,1733761967402-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T16:32:49,139 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T16:32:49,139 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T16:32:49,139 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,38925,1733761967402-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:32:49,234 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51d0668c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T16:32:49,234 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request dfce6ce5b10c,38925,-1 for getting cluster id 2024-12-09T16:32:49,234 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T16:32:49,237 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4f964147-b529-4216-b6ca-d7b505b54c73' 2024-12-09T16:32:49,239 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T16:32:49,239 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4f964147-b529-4216-b6ca-d7b505b54c73" 2024-12-09T16:32:49,240 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d088991, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T16:32:49,240 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [dfce6ce5b10c,38925,-1] 2024-12-09T16:32:49,240 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T16:32:49,240 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:32:49,242 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54200, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T16:32:49,244 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cd94a44, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T16:32:49,244 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T16:32:49,246 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=dfce6ce5b10c,38195,1733761967572, seqNum=-1] 2024-12-09T16:32:49,246 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T16:32:49,249 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59746, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T16:32:49,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=dfce6ce5b10c,38925,1733761967402 2024-12-09T16:32:49,252 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:32:49,257 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T16:32:49,257 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-12-09T16:32:49,257 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-12-09T16:32:49,258 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T16:32:49,259 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is dfce6ce5b10c,38925,1733761967402 2024-12-09T16:32:49,260 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@767dbdb0 2024-12-09T16:32:49,260 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T16:32:49,262 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54212, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T16:32:49,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38925 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T16:32:49,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38925 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T16:32:49,264 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38925 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T16:32:49,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38925 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-12-09T16:32:49,268 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T16:32:49,269 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:32:49,269 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38925 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-12-09T16:32:49,272 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T16:32:49,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38925 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T16:32:49,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39347 is added to blk_1073741835_1011 (size=395) 2024-12-09T16:32:49,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38629 is added to blk_1073741835_1011 (size=395) 2024-12-09T16:32:49,292 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => edaaad3a2649a598b64c77a2ecec276e, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733761969263.edaaad3a2649a598b64c77a2ecec276e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e 2024-12-09T16:32:49,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39347 is added to blk_1073741836_1012 (size=78) 2024-12-09T16:32:49,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38629 is added to blk_1073741836_1012 (size=78) 2024-12-09T16:32:49,350 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733761969263.edaaad3a2649a598b64c77a2ecec276e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:32:49,350 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing edaaad3a2649a598b64c77a2ecec276e, disabling compactions & flushes 2024-12-09T16:32:49,350 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733761969263.edaaad3a2649a598b64c77a2ecec276e. 2024-12-09T16:32:49,350 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733761969263.edaaad3a2649a598b64c77a2ecec276e. 2024-12-09T16:32:49,350 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733761969263.edaaad3a2649a598b64c77a2ecec276e. after waiting 0 ms 2024-12-09T16:32:49,350 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733761969263.edaaad3a2649a598b64c77a2ecec276e. 2024-12-09T16:32:49,350 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733761969263.edaaad3a2649a598b64c77a2ecec276e. 2024-12-09T16:32:49,350 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for edaaad3a2649a598b64c77a2ecec276e: Waiting for close lock at 1733761969350Disabling compacts and flushes for region at 1733761969350Disabling writes for close at 1733761969350Writing region close event to WAL at 1733761969350Closed at 1733761969350 2024-12-09T16:32:49,353 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T16:32:49,353 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1733761969263.edaaad3a2649a598b64c77a2ecec276e.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1733761969353"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733761969353"}]},"ts":"1733761969353"} 2024-12-09T16:32:49,360 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T16:32:49,362 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T16:32:49,363 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733761969363"}]},"ts":"1733761969363"} 2024-12-09T16:32:49,367 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-12-09T16:32:49,367 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=edaaad3a2649a598b64c77a2ecec276e, ASSIGN}] 2024-12-09T16:32:49,369 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=edaaad3a2649a598b64c77a2ecec276e, ASSIGN 2024-12-09T16:32:49,371 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=edaaad3a2649a598b64c77a2ecec276e, ASSIGN; state=OFFLINE, location=dfce6ce5b10c,38195,1733761967572; forceNewPlan=false, retain=false 2024-12-09T16:32:49,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:49,498 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:49,522 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=edaaad3a2649a598b64c77a2ecec276e, regionState=OPENING, regionLocation=dfce6ce5b10c,38195,1733761967572 2024-12-09T16:32:49,527 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=edaaad3a2649a598b64c77a2ecec276e, ASSIGN because future has completed 2024-12-09T16:32:49,528 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure edaaad3a2649a598b64c77a2ecec276e, server=dfce6ce5b10c,38195,1733761967572}] 2024-12-09T16:32:49,638 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T16:32:49,638 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T16:32:49,639 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T16:32:49,639 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T16:32:49,691 INFO [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1733761969263.edaaad3a2649a598b64c77a2ecec276e. 2024-12-09T16:32:49,691 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => edaaad3a2649a598b64c77a2ecec276e, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733761969263.edaaad3a2649a598b64c77a2ecec276e.', STARTKEY => '', ENDKEY => ''} 2024-12-09T16:32:49,691 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart edaaad3a2649a598b64c77a2ecec276e 2024-12-09T16:32:49,691 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733761969263.edaaad3a2649a598b64c77a2ecec276e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:32:49,692 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for edaaad3a2649a598b64c77a2ecec276e 2024-12-09T16:32:49,692 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for edaaad3a2649a598b64c77a2ecec276e 2024-12-09T16:32:49,695 INFO [StoreOpener-edaaad3a2649a598b64c77a2ecec276e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region edaaad3a2649a598b64c77a2ecec276e 2024-12-09T16:32:49,697 INFO [StoreOpener-edaaad3a2649a598b64c77a2ecec276e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region edaaad3a2649a598b64c77a2ecec276e columnFamilyName info 2024-12-09T16:32:49,697 DEBUG [StoreOpener-edaaad3a2649a598b64c77a2ecec276e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:32:49,697 INFO [StoreOpener-edaaad3a2649a598b64c77a2ecec276e-1 {}] regionserver.HStore(327): Store=edaaad3a2649a598b64c77a2ecec276e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:32:49,698 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for edaaad3a2649a598b64c77a2ecec276e 2024-12-09T16:32:49,698 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/default/TestLogRolling-testLogRollOnPipelineRestart/edaaad3a2649a598b64c77a2ecec276e 2024-12-09T16:32:49,699 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/default/TestLogRolling-testLogRollOnPipelineRestart/edaaad3a2649a598b64c77a2ecec276e 2024-12-09T16:32:49,700 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for edaaad3a2649a598b64c77a2ecec276e 2024-12-09T16:32:49,700 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for edaaad3a2649a598b64c77a2ecec276e 2024-12-09T16:32:49,701 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for edaaad3a2649a598b64c77a2ecec276e 2024-12-09T16:32:49,704 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/default/TestLogRolling-testLogRollOnPipelineRestart/edaaad3a2649a598b64c77a2ecec276e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T16:32:49,705 INFO [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened edaaad3a2649a598b64c77a2ecec276e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=847023, jitterRate=0.0770464837551117}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T16:32:49,705 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for edaaad3a2649a598b64c77a2ecec276e 2024-12-09T16:32:49,706 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for edaaad3a2649a598b64c77a2ecec276e: Running coprocessor pre-open hook at 1733761969692Writing region info on filesystem at 1733761969692Initializing all the Stores at 1733761969694 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733761969694Cleaning up temporary data from old regions at 1733761969700 (+6 ms)Running coprocessor post-open hooks at 1733761969705 (+5 ms)Region opened successfully at 1733761969706 (+1 ms) 2024-12-09T16:32:49,709 INFO [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1733761969263.edaaad3a2649a598b64c77a2ecec276e., pid=6, masterSystemTime=1733761969684 2024-12-09T16:32:49,713 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1733761969263.edaaad3a2649a598b64c77a2ecec276e. 2024-12-09T16:32:49,713 INFO [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1733761969263.edaaad3a2649a598b64c77a2ecec276e. 2024-12-09T16:32:49,714 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=edaaad3a2649a598b64c77a2ecec276e, regionState=OPEN, openSeqNum=2, regionLocation=dfce6ce5b10c,38195,1733761967572 2024-12-09T16:32:49,717 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure edaaad3a2649a598b64c77a2ecec276e, server=dfce6ce5b10c,38195,1733761967572 because future has completed 2024-12-09T16:32:49,724 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T16:32:49,724 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure edaaad3a2649a598b64c77a2ecec276e, server=dfce6ce5b10c,38195,1733761967572 in 192 msec 2024-12-09T16:32:49,728 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T16:32:49,728 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=edaaad3a2649a598b64c77a2ecec276e, ASSIGN in 357 msec 2024-12-09T16:32:49,739 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T16:32:49,739 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733761969739"}]},"ts":"1733761969739"} 2024-12-09T16:32:49,742 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-12-09T16:32:49,745 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T16:32:49,749 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 481 msec 2024-12-09T16:32:50,061 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:50,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:50,499 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:51,062 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:51,448 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:51,499 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:52,063 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:52,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:52,500 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:53,064 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:53,449 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:53,501 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:54,065 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:54,452 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:54,502 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:54,517 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T16:32:54,537 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:54,537 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:54,537 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:54,538 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:54,538 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:54,538 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:54,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:54,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:54,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:54,544 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:32:54,551 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T16:32:54,551 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-12-09T16:32:55,066 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:55,453 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:55,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:56,067 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:56,454 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:56,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:57,067 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:57,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:57,504 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:58,068 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:58,455 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:58,505 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:59,069 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:59,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38925 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T16:32:59,312 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-12-09T16:32:59,312 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-12-09T16:32:59,315 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-12-09T16:32:59,316 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1733761969263.edaaad3a2649a598b64c77a2ecec276e. 2024-12-09T16:32:59,320 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1733761969263.edaaad3a2649a598b64c77a2ecec276e., hostname=dfce6ce5b10c,38195,1733761967572, seqNum=2] 2024-12-09T16:32:59,456 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:59,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:32:59,638 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-09T16:32:59,638 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-12-09T16:33:00,070 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:00,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:00,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:01,070 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:01,323 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761968561 2024-12-09T16:33:01,324 WARN [ResponseProcessor for block BP-985864207-172.17.0.2-1733761965604:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-985864207-172.17.0.2-1733761965604:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:01,324 WARN [ResponseProcessor for block BP-985864207-172.17.0.2-1733761965604:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-985864207-172.17.0.2-1733761965604:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:01,324 WARN [ResponseProcessor for block BP-985864207-172.17.0.2-1733761965604:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-985864207-172.17.0.2-1733761965604:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:01,324 WARN [DataStreamer for file /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761968561 block BP-985864207-172.17.0.2-1733761965604:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985864207-172.17.0.2-1733761965604:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39347,DS-74ddcad1-451e-4142-b42a-148128315b3b,DISK], DatanodeInfoWithStorage[127.0.0.1:38629,DS-e5a10720-c573-4ca0-affc-a9872dd6d17c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39347,DS-74ddcad1-451e-4142-b42a-148128315b3b,DISK]) is bad. 2024-12-09T16:33:01,324 WARN [DataStreamer for file /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.meta.1733761968978.meta block BP-985864207-172.17.0.2-1733761965604:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985864207-172.17.0.2-1733761965604:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39347,DS-74ddcad1-451e-4142-b42a-148128315b3b,DISK], DatanodeInfoWithStorage[127.0.0.1:38629,DS-e5a10720-c573-4ca0-affc-a9872dd6d17c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39347,DS-74ddcad1-451e-4142-b42a-148128315b3b,DISK]) is bad. 2024-12-09T16:33:01,325 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-552523611_22 at /127.0.0.1:60490 [Receiving block BP-985864207-172.17.0.2-1733761965604:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39347:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60490 dst: /127.0.0.1:39347 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:33:01,325 WARN [DataStreamer for file /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/WALs/dfce6ce5b10c,38925,1733761967402/dfce6ce5b10c%2C38925%2C1733761967402.1733761967876 block BP-985864207-172.17.0.2-1733761965604:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985864207-172.17.0.2-1733761965604:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39347,DS-74ddcad1-451e-4142-b42a-148128315b3b,DISK], DatanodeInfoWithStorage[127.0.0.1:38629,DS-e5a10720-c573-4ca0-affc-a9872dd6d17c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39347,DS-74ddcad1-451e-4142-b42a-148128315b3b,DISK]) is bad. 2024-12-09T16:33:01,325 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-552523611_22 at /127.0.0.1:45434 [Receiving block BP-985864207-172.17.0.2-1733761965604:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:38629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45434 dst: /127.0.0.1:38629 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:33:01,325 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-552523611_22 at /127.0.0.1:60506 [Receiving block BP-985864207-172.17.0.2-1733761965604:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39347:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60506 dst: /127.0.0.1:39347 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:33:01,325 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-552523611_22 at /127.0.0.1:45448 [Receiving block BP-985864207-172.17.0.2-1733761965604:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:38629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45448 dst: /127.0.0.1:38629 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:33:01,326 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_822974619_22 at /127.0.0.1:60450 [Receiving block BP-985864207-172.17.0.2-1733761965604:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39347:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60450 dst: /127.0.0.1:39347 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:33:01,326 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_822974619_22 at /127.0.0.1:45406 [Receiving block BP-985864207-172.17.0.2-1733761965604:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:38629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45406 dst: /127.0.0.1:38629 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:33:01,346 WARN [BP-985864207-172.17.0.2-1733761965604 heartbeating to localhost/127.0.0.1:37495 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-985864207-172.17.0.2-1733761965604 (Datanode Uuid f7f267ed-ff2e-4163-a258-aa4a3188f4b7) service to localhost/127.0.0.1:37495 2024-12-09T16:33:01,368 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/cluster_89c688b7-6653-d246-c2bd-933622a33f0f/data/data3/current/BP-985864207-172.17.0.2-1733761965604 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:33:01,369 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/cluster_89c688b7-6653-d246-c2bd-933622a33f0f/data/data4/current/BP-985864207-172.17.0.2-1733761965604 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:33:01,369 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@47718438{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:33:01,370 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6d44665f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:33:01,370 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:33:01,370 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2380c17e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:33:01,370 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@219aea9f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/hadoop.log.dir/,STOPPED} 2024-12-09T16:33:01,372 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T16:33:01,384 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:33:01,387 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:33:01,388 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:33:01,388 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:33:01,388 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T16:33:01,389 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@787dca1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:33:01,389 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64ee86a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:33:01,457 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:01,486 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5ce5842a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/java.io.tmpdir/jetty-localhost-44643-hadoop-hdfs-3_4_1-tests_jar-_-any-1788186094973649142/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:33:01,487 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@71cc65b7{HTTP/1.1, (http/1.1)}{localhost:44643} 2024-12-09T16:33:01,487 INFO [Time-limited test {}] server.Server(415): Started @180653ms 2024-12-09T16:33:01,488 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T16:33:01,507 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:01,522 WARN [ResponseProcessor for block BP-985864207-172.17.0.2-1733761965604:blk_1073741830_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-985864207-172.17.0.2-1733761965604:blk_1073741830_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:01,522 WARN [ResponseProcessor for block BP-985864207-172.17.0.2-1733761965604:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-985864207-172.17.0.2-1733761965604:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:01,522 WARN [ResponseProcessor for block BP-985864207-172.17.0.2-1733761965604:blk_1073741833_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-985864207-172.17.0.2-1733761965604:blk_1073741833_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:01,523 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-552523611_22 at /127.0.0.1:35892 [Receiving block BP-985864207-172.17.0.2-1733761965604:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:38629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35892 dst: /127.0.0.1:38629 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:33:01,523 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_822974619_22 at /127.0.0.1:35894 [Receiving block BP-985864207-172.17.0.2-1733761965604:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:38629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35894 dst: /127.0.0.1:38629 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:33:01,524 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-552523611_22 at /127.0.0.1:35914 [Receiving block BP-985864207-172.17.0.2-1733761965604:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:38629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35914 dst: /127.0.0.1:38629 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:33:01,575 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2ad27871{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:33:01,576 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3275e649{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:33:01,576 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:33:01,577 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5fe94dda{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:33:01,577 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b804995{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/hadoop.log.dir/,STOPPED} 2024-12-09T16:33:01,578 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T16:33:01,578 WARN [BP-985864207-172.17.0.2-1733761965604 heartbeating to localhost/127.0.0.1:37495 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T16:33:01,578 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T16:33:01,578 WARN [BP-985864207-172.17.0.2-1733761965604 heartbeating to localhost/127.0.0.1:37495 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-985864207-172.17.0.2-1733761965604 (Datanode Uuid f013fb29-3851-41b0-b30a-5cd23a07e941) service to localhost/127.0.0.1:37495 2024-12-09T16:33:01,579 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/cluster_89c688b7-6653-d246-c2bd-933622a33f0f/data/data1/current/BP-985864207-172.17.0.2-1733761965604 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:33:01,579 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/cluster_89c688b7-6653-d246-c2bd-933622a33f0f/data/data2/current/BP-985864207-172.17.0.2-1733761965604 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:33:01,579 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T16:33:01,600 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:33:01,606 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:33:01,607 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:33:01,607 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:33:01,607 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T16:33:01,608 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@71ca8ce1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:33:01,608 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78650317{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:33:01,704 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@77a7789b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/java.io.tmpdir/jetty-localhost-37025-hadoop-hdfs-3_4_1-tests_jar-_-any-8755096014629705082/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:33:01,706 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@56f019d9{HTTP/1.1, (http/1.1)}{localhost:37025} 2024-12-09T16:33:01,706 INFO [Time-limited test {}] server.Server(415): Started @180872ms 2024-12-09T16:33:01,708 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T16:33:01,890 WARN [Thread-1336 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T16:33:01,893 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfc689be718aa4b77 with lease ID 0x12ab51799507c38e: from storage DS-74ddcad1-451e-4142-b42a-148128315b3b node DatanodeRegistration(127.0.0.1:44431, datanodeUuid=f7f267ed-ff2e-4163-a258-aa4a3188f4b7, infoPort=33863, infoSecurePort=0, ipcPort=46155, storageInfo=lv=-57;cid=testClusterID;nsid=191725825;c=1733761965604), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:33:01,893 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfc689be718aa4b77 with lease ID 0x12ab51799507c38e: from storage DS-e651beaf-7ec1-44fd-8b4c-051c3f818348 node DatanodeRegistration(127.0.0.1:44431, datanodeUuid=f7f267ed-ff2e-4163-a258-aa4a3188f4b7, infoPort=33863, infoSecurePort=0, ipcPort=46155, storageInfo=lv=-57;cid=testClusterID;nsid=191725825;c=1733761965604), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:33:02,071 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:02,109 WARN [Thread-1357 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T16:33:02,111 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x598722d52b3d4dc5 with lease ID 0x12ab51799507c38f: from storage DS-e5a10720-c573-4ca0-affc-a9872dd6d17c node DatanodeRegistration(127.0.0.1:36211, datanodeUuid=f013fb29-3851-41b0-b30a-5cd23a07e941, infoPort=42917, infoSecurePort=0, ipcPort=33369, storageInfo=lv=-57;cid=testClusterID;nsid=191725825;c=1733761965604), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:33:02,112 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x598722d52b3d4dc5 with lease ID 0x12ab51799507c38f: from storage DS-dae9b003-60d2-4a8c-849c-33098f5e9641 node DatanodeRegistration(127.0.0.1:36211, datanodeUuid=f013fb29-3851-41b0-b30a-5cd23a07e941, infoPort=42917, infoSecurePort=0, ipcPort=33369, storageInfo=lv=-57;cid=testClusterID;nsid=191725825;c=1733761965604), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:33:02,458 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:02,508 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:02,732 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-12-09T16:33:02,737 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-12-09T16:33:02,740 ERROR [FSHLog-0-hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e-prefix:dfce6ce5b10c,38195,1733761967572 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38629,DS-e5a10720-c573-4ca0-affc-a9872dd6d17c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:02,740 WARN [FSHLog-0-hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e-prefix:dfce6ce5b10c,38195,1733761967572 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38629,DS-e5a10720-c573-4ca0-affc-a9872dd6d17c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:02,740 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog dfce6ce5b10c%2C38195%2C1733761967572:(num 1733761968561) roll requested 2024-12-09T16:33:02,741 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C38195%2C1733761967572.1733761982741 2024-12-09T16:33:02,752 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761968561 newFile=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761982741 2024-12-09T16:33:02,753 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:02,753 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:02,753 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:02,753 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:02,753 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:02,753 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761968561 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761982741 2024-12-09T16:33:02,757 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38629,DS-e5a10720-c573-4ca0-affc-a9872dd6d17c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:02,757 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38629,DS-e5a10720-c573-4ca0-affc-a9872dd6d17c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:02,757 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761968561 2024-12-09T16:33:02,758 WARN [IPC Server handler 2 on default port 37495 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761968561 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1015 2024-12-09T16:33:02,759 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761968561 after 2ms 2024-12-09T16:33:02,766 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42917:42917),(127.0.0.1/127.0.0.1:33863:33863)] 2024-12-09T16:33:02,766 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761968561 is not closed yet, will try archiving it next time 2024-12-09T16:33:03,072 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:03,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36211 is added to blk_1073741833_1017 (size=1632) 2024-12-09T16:33:03,459 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:03,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:04,074 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:04,460 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:04,510 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:04,770 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-12-09T16:33:04,894 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1015: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-09T16:33:05,075 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:05,461 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:05,511 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:06,075 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:06,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:06,512 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:06,760 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761968561 after 4003ms 2024-12-09T16:33:06,775 WARN [ResponseProcessor for block BP-985864207-172.17.0.2-1733761965604:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-985864207-172.17.0.2-1733761965604:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:06,776 WARN [DataStreamer for file /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761982741 block BP-985864207-172.17.0.2-1733761965604:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985864207-172.17.0.2-1733761965604:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36211,DS-e5a10720-c573-4ca0-affc-a9872dd6d17c,DISK], DatanodeInfoWithStorage[127.0.0.1:44431,DS-74ddcad1-451e-4142-b42a-148128315b3b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36211,DS-e5a10720-c573-4ca0-affc-a9872dd6d17c,DISK]) is bad. 2024-12-09T16:33:06,776 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-552523611_22 at /127.0.0.1:39396 [Receiving block BP-985864207-172.17.0.2-1733761965604:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:44431:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39396 dst: /127.0.0.1:44431 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:33:06,776 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-552523611_22 at /127.0.0.1:37872 [Receiving block BP-985864207-172.17.0.2-1733761965604:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:36211:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37872 dst: /127.0.0.1:36211 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:33:06,792 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@77a7789b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:33:06,792 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@56f019d9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:33:06,792 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:33:06,792 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78650317{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:33:06,793 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@71ca8ce1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/hadoop.log.dir/,STOPPED} 2024-12-09T16:33:06,794 WARN [BP-985864207-172.17.0.2-1733761965604 heartbeating to localhost/127.0.0.1:37495 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T16:33:06,794 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T16:33:06,794 WARN [BP-985864207-172.17.0.2-1733761965604 heartbeating to localhost/127.0.0.1:37495 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-985864207-172.17.0.2-1733761965604 (Datanode Uuid f013fb29-3851-41b0-b30a-5cd23a07e941) service to localhost/127.0.0.1:37495 2024-12-09T16:33:06,794 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T16:33:06,795 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/cluster_89c688b7-6653-d246-c2bd-933622a33f0f/data/data1/current/BP-985864207-172.17.0.2-1733761965604 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:33:06,795 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/cluster_89c688b7-6653-d246-c2bd-933622a33f0f/data/data2/current/BP-985864207-172.17.0.2-1733761965604 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:33:06,795 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T16:33:06,806 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:33:06,809 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:33:06,810 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:33:06,810 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:33:06,810 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T16:33:06,811 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@382189e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:33:06,811 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@280932b3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:33:06,911 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@25ec21ee{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/java.io.tmpdir/jetty-localhost-33083-hadoop-hdfs-3_4_1-tests_jar-_-any-9114523492398301387/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:33:06,912 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@78fa9015{HTTP/1.1, (http/1.1)}{localhost:33083} 2024-12-09T16:33:06,912 INFO [Time-limited test {}] server.Server(415): Started @186077ms 2024-12-09T16:33:06,913 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T16:33:06,946 WARN [ResponseProcessor for block BP-985864207-172.17.0.2-1733761965604:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-985864207-172.17.0.2-1733761965604:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:06,947 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-552523611_22 at /127.0.0.1:58308 [Receiving block BP-985864207-172.17.0.2-1733761965604:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:44431:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58308 dst: /127.0.0.1:44431 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:33:06,950 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5ce5842a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:33:06,950 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@71cc65b7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:33:06,950 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:33:06,950 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64ee86a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:33:06,950 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@787dca1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/hadoop.log.dir/,STOPPED} 2024-12-09T16:33:06,952 WARN [BP-985864207-172.17.0.2-1733761965604 heartbeating to localhost/127.0.0.1:37495 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T16:33:06,952 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T16:33:06,952 WARN [BP-985864207-172.17.0.2-1733761965604 heartbeating to localhost/127.0.0.1:37495 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-985864207-172.17.0.2-1733761965604 (Datanode Uuid f7f267ed-ff2e-4163-a258-aa4a3188f4b7) service to localhost/127.0.0.1:37495 2024-12-09T16:33:06,952 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T16:33:06,952 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/cluster_89c688b7-6653-d246-c2bd-933622a33f0f/data/data3/current/BP-985864207-172.17.0.2-1733761965604 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:33:06,952 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/cluster_89c688b7-6653-d246-c2bd-933622a33f0f/data/data4/current/BP-985864207-172.17.0.2-1733761965604 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:33:06,953 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T16:33:06,970 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:33:06,975 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:33:06,975 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:33:06,975 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:33:06,975 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T16:33:06,976 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7544ba23{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:33:06,976 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68d0885b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:33:07,075 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@507d2888{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/java.io.tmpdir/jetty-localhost-39425-hadoop-hdfs-3_4_1-tests_jar-_-any-3226587156131562231/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:33:07,076 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5fc4ac56{HTTP/1.1, (http/1.1)}{localhost:39425} 2024-12-09T16:33:07,076 INFO [Time-limited test {}] server.Server(415): Started @186241ms 2024-12-09T16:33:07,076 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:07,078 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T16:33:07,353 WARN [Thread-1410 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T16:33:07,356 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x22fd32e9a13b99b3 with lease ID 0x12ab51799507c390: from storage DS-e5a10720-c573-4ca0-affc-a9872dd6d17c node DatanodeRegistration(127.0.0.1:34105, datanodeUuid=f013fb29-3851-41b0-b30a-5cd23a07e941, infoPort=37855, infoSecurePort=0, ipcPort=38869, storageInfo=lv=-57;cid=testClusterID;nsid=191725825;c=1733761965604), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:33:07,357 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x22fd32e9a13b99b3 with lease ID 0x12ab51799507c390: from storage DS-dae9b003-60d2-4a8c-849c-33098f5e9641 node DatanodeRegistration(127.0.0.1:34105, datanodeUuid=f013fb29-3851-41b0-b30a-5cd23a07e941, infoPort=37855, infoSecurePort=0, ipcPort=38869, storageInfo=lv=-57;cid=testClusterID;nsid=191725825;c=1733761965604), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:33:07,462 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:07,503 WARN [Thread-1430 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T16:33:07,506 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x20022421ccf4c491 with lease ID 0x12ab51799507c391: from storage DS-74ddcad1-451e-4142-b42a-148128315b3b node DatanodeRegistration(127.0.0.1:42063, datanodeUuid=f7f267ed-ff2e-4163-a258-aa4a3188f4b7, infoPort=35247, infoSecurePort=0, ipcPort=40037, storageInfo=lv=-57;cid=testClusterID;nsid=191725825;c=1733761965604), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:33:07,506 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x20022421ccf4c491 with lease ID 0x12ab51799507c391: from storage DS-e651beaf-7ec1-44fd-8b4c-051c3f818348 node DatanodeRegistration(127.0.0.1:42063, datanodeUuid=f7f267ed-ff2e-4163-a258-aa4a3188f4b7, infoPort=35247, infoSecurePort=0, ipcPort=40037, storageInfo=lv=-57;cid=testClusterID;nsid=191725825;c=1733761965604), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:33:07,512 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:08,077 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:08,096 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-12-09T16:33:08,098 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-12-09T16:33:08,100 ERROR [FSHLog-0-hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e-prefix:dfce6ce5b10c,38195,1733761967572 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44431,DS-74ddcad1-451e-4142-b42a-148128315b3b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:08,100 WARN [FSHLog-0-hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e-prefix:dfce6ce5b10c,38195,1733761967572 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44431,DS-74ddcad1-451e-4142-b42a-148128315b3b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:08,100 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog dfce6ce5b10c%2C38195%2C1733761967572:(num 1733761982741) roll requested 2024-12-09T16:33:08,101 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C38195%2C1733761967572.1733761988100 2024-12-09T16:33:08,106 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761982741 newFile=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761988100 2024-12-09T16:33:08,106 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:08,107 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:08,107 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:08,107 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:08,107 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:08,107 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761982741 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761988100 2024-12-09T16:33:08,107 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44431,DS-74ddcad1-451e-4142-b42a-148128315b3b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:08,107 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:44431,DS-74ddcad1-451e-4142-b42a-148128315b3b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:08,107 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761982741 2024-12-09T16:33:08,108 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35247:35247),(127.0.0.1/127.0.0.1:37855:37855)] 2024-12-09T16:33:08,108 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761982741 is not closed yet, will try archiving it next time 2024-12-09T16:33:08,108 WARN [IPC Server handler 3 on default port 37495 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761982741 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-12-09T16:33:08,108 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761982741 after 1ms 2024-12-09T16:33:08,463 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:08,513 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:09,078 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:09,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:09,514 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:10,078 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:10,109 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C38195%2C1733761967572.1733761990109 2024-12-09T16:33:10,118 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761988100 newFile=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761990109 2024-12-09T16:33:10,118 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:10,119 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:10,119 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:10,119 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:10,119 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:10,119 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761988100 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761990109 2024-12-09T16:33:10,120 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37855:37855),(127.0.0.1/127.0.0.1:35247:35247)] 2024-12-09T16:33:10,120 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761982741 is not closed yet, will try archiving it next time 2024-12-09T16:33:10,120 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761988100 is not closed yet, will try archiving it next time 2024-12-09T16:33:10,121 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761968561 2024-12-09T16:33:10,121 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761968561 2024-12-09T16:33:10,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34105 is added to blk_1073741838_1019 (size=1264) 2024-12-09T16:33:10,122 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761968561 after 0ms 2024-12-09T16:33:10,122 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761968561 2024-12-09T16:33:10,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42063 is added to blk_1073741838_1019 (size=1264) 2024-12-09T16:33:10,122 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761982741 is not closed yet, will try archiving it next time 2024-12-09T16:33:10,131 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733761969706/Put/vlen=218/seqid=0] 2024-12-09T16:33:10,132 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1733761979321/Put/vlen=1045/seqid=0] 2024-12-09T16:33:10,132 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761968561 2024-12-09T16:33:10,132 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761982741 2024-12-09T16:33:10,132 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761982741 2024-12-09T16:33:10,132 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761982741 after 0ms 2024-12-09T16:33:10,132 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761982741 2024-12-09T16:33:10,136 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1733761982739/Put/vlen=1045/seqid=0] 2024-12-09T16:33:10,136 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1733761984772/Put/vlen=1045/seqid=0] 2024-12-09T16:33:10,136 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761982741 2024-12-09T16:33:10,136 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761988100 2024-12-09T16:33:10,136 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761988100 2024-12-09T16:33:10,137 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761988100 after 1ms 2024-12-09T16:33:10,137 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761988100 2024-12-09T16:33:10,140 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1733761988100/Put/vlen=1045/seqid=0] 2024-12-09T16:33:10,140 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761990109 2024-12-09T16:33:10,140 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761990109 2024-12-09T16:33:10,141 WARN [IPC Server handler 2 on default port 37495 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761990109 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-12-09T16:33:10,141 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761990109 after 1ms 2024-12-09T16:33:10,356 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-09T16:33:10,464 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:10,510 WARN [ResponseProcessor for block BP-985864207-172.17.0.2-1733761965604:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-985864207-172.17.0.2-1733761965604:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:10,510 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_822974619_22 at /127.0.0.1:34130 [Receiving block BP-985864207-172.17.0.2-1733761965604:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:34105:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34130 dst: /127.0.0.1:34105 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:34105 remote=/127.0.0.1:34130]. Total timeout mills is 60000, 59608 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:33:10,510 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_822974619_22 at /127.0.0.1:41832 [Receiving block BP-985864207-172.17.0.2-1733761965604:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:42063:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41832 dst: /127.0.0.1:42063 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:33:10,510 WARN [DataStreamer for file /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761990109 block BP-985864207-172.17.0.2-1733761965604:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-985864207-172.17.0.2-1733761965604:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34105,DS-e5a10720-c573-4ca0-affc-a9872dd6d17c,DISK], DatanodeInfoWithStorage[127.0.0.1:42063,DS-74ddcad1-451e-4142-b42a-148128315b3b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34105,DS-e5a10720-c573-4ca0-affc-a9872dd6d17c,DISK]) is bad. 2024-12-09T16:33:10,511 WARN [DataStreamer for file /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761990109 block BP-985864207-172.17.0.2-1733761965604:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-985864207-172.17.0.2-1733761965604:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:10,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:10,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34105 is added to blk_1073741839_1022 (size=85) 2024-12-09T16:33:11,079 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:11,465 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:11,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:12,080 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:12,109 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761982741 after 4002ms 2024-12-09T16:33:12,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:12,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:13,081 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:13,466 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:13,517 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:14,081 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:14,142 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761990109 after 4002ms 2024-12-09T16:33:14,142 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761990109 2024-12-09T16:33:14,146 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761990109 2024-12-09T16:33:14,146 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing edaaad3a2649a598b64c77a2ecec276e 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-09T16:33:14,147 ERROR [FSHLog-0-hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e-prefix:dfce6ce5b10c,38195,1733761967572 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-985864207-172.17.0.2-1733761965604:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:14,147 WARN [FSHLog-0-hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e-prefix:dfce6ce5b10c,38195,1733761967572 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-985864207-172.17.0.2-1733761965604:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:14,147 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog dfce6ce5b10c%2C38195%2C1733761967572:(num 1733761990109) roll requested 2024-12-09T16:33:14,148 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C38195%2C1733761967572.1733761994148 2024-12-09T16:33:14,161 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761990109 newFile=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761994148 2024-12-09T16:33:14,161 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:14,161 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:14,161 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:14,161 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:14,161 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:14,161 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761990109 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761994148 2024-12-09T16:33:14,161 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-985864207-172.17.0.2-1733761965604:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:14,162 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-985864207-172.17.0.2-1733761965604:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:14,162 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761990109 2024-12-09T16:33:14,163 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761990109 after 1ms 2024-12-09T16:33:14,165 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.1733761990109 to hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/oldWALs/dfce6ce5b10c%2C38195%2C1733761967572.1733761990109 2024-12-09T16:33:14,166 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37855:37855),(127.0.0.1/127.0.0.1:35247:35247)] 2024-12-09T16:33:14,180 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/default/TestLogRolling-testLogRollOnPipelineRestart/edaaad3a2649a598b64c77a2ecec276e/.tmp/info/96eef5f87ec3452da79f36d3c129204e is 1080, key is row1002/info:/1733761979321/Put/seqid=0 2024-12-09T16:33:14,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42063 is added to blk_1073741841_1024 (size=9270) 2024-12-09T16:33:14,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34105 is added to blk_1073741841_1024 (size=9270) 2024-12-09T16:33:14,187 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/default/TestLogRolling-testLogRollOnPipelineRestart/edaaad3a2649a598b64c77a2ecec276e/.tmp/info/96eef5f87ec3452da79f36d3c129204e 2024-12-09T16:33:14,194 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/default/TestLogRolling-testLogRollOnPipelineRestart/edaaad3a2649a598b64c77a2ecec276e/.tmp/info/96eef5f87ec3452da79f36d3c129204e as hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/default/TestLogRolling-testLogRollOnPipelineRestart/edaaad3a2649a598b64c77a2ecec276e/info/96eef5f87ec3452da79f36d3c129204e 2024-12-09T16:33:14,201 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/default/TestLogRolling-testLogRollOnPipelineRestart/edaaad3a2649a598b64c77a2ecec276e/info/96eef5f87ec3452da79f36d3c129204e, entries=4, sequenceid=8, filesize=9.1 K 2024-12-09T16:33:14,203 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for edaaad3a2649a598b64c77a2ecec276e in 57ms, sequenceid=8, compaction requested=false 2024-12-09T16:33:14,203 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for edaaad3a2649a598b64c77a2ecec276e: 2024-12-09T16:33:14,203 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-12-09T16:33:14,204 ERROR [FSHLog-0-hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e-prefix:dfce6ce5b10c,38195,1733761967572.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38629,DS-e5a10720-c573-4ca0-affc-a9872dd6d17c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:14,204 WARN [FSHLog-0-hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e-prefix:dfce6ce5b10c,38195,1733761967572.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38629,DS-e5a10720-c573-4ca0-affc-a9872dd6d17c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:14,204 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog dfce6ce5b10c%2C38195%2C1733761967572.meta:.meta(num 1733761968978) roll requested 2024-12-09T16:33:14,204 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C38195%2C1733761967572.meta.1733761994204.meta 2024-12-09T16:33:14,212 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:14,212 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:14,212 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:14,212 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:14,212 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:14,213 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.meta.1733761968978.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.meta.1733761994204.meta 2024-12-09T16:33:14,213 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38629,DS-e5a10720-c573-4ca0-affc-a9872dd6d17c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:14,213 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38629,DS-e5a10720-c573-4ca0-affc-a9872dd6d17c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:14,213 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.meta.1733761968978.meta 2024-12-09T16:33:14,214 WARN [IPC Server handler 3 on default port 37495 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.meta.1733761968978.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1013 2024-12-09T16:33:14,214 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.meta.1733761968978.meta after 1ms 2024-12-09T16:33:14,218 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35247:35247),(127.0.0.1/127.0.0.1:37855:37855)] 2024-12-09T16:33:14,218 DEBUG [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.meta.1733761968978.meta is not closed yet, will try archiving it next time 2024-12-09T16:33:14,238 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/hbase/meta/1588230740/.tmp/info/ee5d282d64f24c03acf4990551901d8b is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1733761969263.edaaad3a2649a598b64c77a2ecec276e./info:regioninfo/1733761969714/Put/seqid=0 2024-12-09T16:33:14,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34105 is added to blk_1073741843_1027 (size=7125) 2024-12-09T16:33:14,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42063 is added to blk_1073741843_1027 (size=7125) 2024-12-09T16:33:14,244 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/hbase/meta/1588230740/.tmp/info/ee5d282d64f24c03acf4990551901d8b 2024-12-09T16:33:14,273 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/hbase/meta/1588230740/.tmp/ns/6c889b9259284d5d8c5788c023d8be85 is 43, key is default/ns:d/1733761969115/Put/seqid=0 2024-12-09T16:33:14,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34105 is added to blk_1073741844_1028 (size=5153) 2024-12-09T16:33:14,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42063 is added to blk_1073741844_1028 (size=5153) 2024-12-09T16:33:14,280 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/hbase/meta/1588230740/.tmp/ns/6c889b9259284d5d8c5788c023d8be85 2024-12-09T16:33:14,305 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/hbase/meta/1588230740/.tmp/table/df076a9433514bf98292c6b44d53d879 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1733761969739/Put/seqid=0 2024-12-09T16:33:14,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34105 is added to blk_1073741845_1029 (size=5438) 2024-12-09T16:33:14,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42063 is added to blk_1073741845_1029 (size=5438) 2024-12-09T16:33:14,311 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/hbase/meta/1588230740/.tmp/table/df076a9433514bf98292c6b44d53d879 2024-12-09T16:33:14,317 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/hbase/meta/1588230740/.tmp/info/ee5d282d64f24c03acf4990551901d8b as hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/hbase/meta/1588230740/info/ee5d282d64f24c03acf4990551901d8b 2024-12-09T16:33:14,325 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/hbase/meta/1588230740/info/ee5d282d64f24c03acf4990551901d8b, entries=10, sequenceid=11, filesize=7.0 K 2024-12-09T16:33:14,326 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/hbase/meta/1588230740/.tmp/ns/6c889b9259284d5d8c5788c023d8be85 as hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/hbase/meta/1588230740/ns/6c889b9259284d5d8c5788c023d8be85 2024-12-09T16:33:14,332 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/hbase/meta/1588230740/ns/6c889b9259284d5d8c5788c023d8be85, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T16:33:14,333 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/hbase/meta/1588230740/.tmp/table/df076a9433514bf98292c6b44d53d879 as hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/hbase/meta/1588230740/table/df076a9433514bf98292c6b44d53d879 2024-12-09T16:33:14,341 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/hbase/meta/1588230740/table/df076a9433514bf98292c6b44d53d879, entries=2, sequenceid=11, filesize=5.3 K 2024-12-09T16:33:14,342 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 139ms, sequenceid=11, compaction requested=false 2024-12-09T16:33:14,342 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-09T16:33:14,350 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T16:33:14,351 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T16:33:14,351 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T16:33:14,351 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:33:14,351 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:33:14,351 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T16:33:14,352 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T16:33:14,352 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=364509557, stopped=false 2024-12-09T16:33:14,352 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=dfce6ce5b10c,38925,1733761967402 2024-12-09T16:33:14,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T16:33:14,369 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38195-0x100bfc623c50001, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T16:33:14,369 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38195-0x100bfc623c50001, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:33:14,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:33:14,369 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T16:33:14,370 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T16:33:14,370 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:33:14,370 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T16:33:14,370 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:33:14,370 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38195-0x100bfc623c50001, quorum=127.0.0.1:59507, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:33:14,371 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'dfce6ce5b10c,38195,1733761967572' ***** 2024-12-09T16:33:14,371 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T16:33:14,371 INFO [RS:0;dfce6ce5b10c:38195 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T16:33:14,371 INFO [RS:0;dfce6ce5b10c:38195 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T16:33:14,371 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T16:33:14,371 INFO [RS:0;dfce6ce5b10c:38195 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T16:33:14,371 INFO [RS:0;dfce6ce5b10c:38195 {}] regionserver.HRegionServer(3091): Received CLOSE for edaaad3a2649a598b64c77a2ecec276e 2024-12-09T16:33:14,371 INFO [RS:0;dfce6ce5b10c:38195 {}] regionserver.HRegionServer(959): stopping server dfce6ce5b10c,38195,1733761967572 2024-12-09T16:33:14,372 INFO [RS:0;dfce6ce5b10c:38195 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T16:33:14,372 INFO [RS:0;dfce6ce5b10c:38195 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;dfce6ce5b10c:38195. 2024-12-09T16:33:14,372 DEBUG [RS:0;dfce6ce5b10c:38195 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T16:33:14,372 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing edaaad3a2649a598b64c77a2ecec276e, disabling compactions & flushes 2024-12-09T16:33:14,372 DEBUG [RS:0;dfce6ce5b10c:38195 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:33:14,372 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733761969263.edaaad3a2649a598b64c77a2ecec276e. 2024-12-09T16:33:14,372 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733761969263.edaaad3a2649a598b64c77a2ecec276e. 2024-12-09T16:33:14,372 INFO [RS:0;dfce6ce5b10c:38195 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T16:33:14,372 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733761969263.edaaad3a2649a598b64c77a2ecec276e. after waiting 0 ms 2024-12-09T16:33:14,372 INFO [RS:0;dfce6ce5b10c:38195 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T16:33:14,372 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733761969263.edaaad3a2649a598b64c77a2ecec276e. 2024-12-09T16:33:14,372 INFO [RS:0;dfce6ce5b10c:38195 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T16:33:14,372 INFO [RS:0;dfce6ce5b10c:38195 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T16:33:14,372 INFO [RS:0;dfce6ce5b10c:38195 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T16:33:14,372 DEBUG [RS:0;dfce6ce5b10c:38195 {}] regionserver.HRegionServer(1325): Online Regions={edaaad3a2649a598b64c77a2ecec276e=TestLogRolling-testLogRollOnPipelineRestart,,1733761969263.edaaad3a2649a598b64c77a2ecec276e., 1588230740=hbase:meta,,1.1588230740} 2024-12-09T16:33:14,372 DEBUG [RS:0;dfce6ce5b10c:38195 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, edaaad3a2649a598b64c77a2ecec276e 2024-12-09T16:33:14,372 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T16:33:14,373 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T16:33:14,373 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T16:33:14,373 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T16:33:14,373 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T16:33:14,387 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/default/TestLogRolling-testLogRollOnPipelineRestart/edaaad3a2649a598b64c77a2ecec276e/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-12-09T16:33:14,388 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733761969263.edaaad3a2649a598b64c77a2ecec276e. 2024-12-09T16:33:14,388 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for edaaad3a2649a598b64c77a2ecec276e: Waiting for close lock at 1733761994372Running coprocessor pre-close hooks at 1733761994372Disabling compacts and flushes for region at 1733761994372Disabling writes for close at 1733761994372Writing region close event to WAL at 1733761994373 (+1 ms)Running coprocessor post-close hooks at 1733761994387 (+14 ms)Closed at 1733761994387 2024-12-09T16:33:14,388 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733761969263.edaaad3a2649a598b64c77a2ecec276e. 2024-12-09T16:33:14,390 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T16:33:14,390 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T16:33:14,391 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T16:33:14,391 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733761994372Running coprocessor pre-close hooks at 1733761994372Disabling compacts and flushes for region at 1733761994372Disabling writes for close at 1733761994373 (+1 ms)Writing region close event to WAL at 1733761994386 (+13 ms)Running coprocessor post-close hooks at 1733761994390 (+4 ms)Closed at 1733761994391 (+1 ms) 2024-12-09T16:33:14,391 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T16:33:14,407 INFO [regionserver/dfce6ce5b10c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T16:33:14,409 INFO [regionserver/dfce6ce5b10c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T16:33:14,409 INFO [regionserver/dfce6ce5b10c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T16:33:14,467 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:14,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:14,573 INFO [RS:0;dfce6ce5b10c:38195 {}] regionserver.HRegionServer(976): stopping server dfce6ce5b10c,38195,1733761967572; all regions closed. 2024-12-09T16:33:14,573 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:14,573 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:14,574 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:14,574 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:14,574 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:14,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34105 is added to blk_1073741842_1025 (size=825) 2024-12-09T16:33:14,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42063 is added to blk_1073741842_1025 (size=825) 2024-12-09T16:33:15,082 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:15,468 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:15,518 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:16,083 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:16,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:16,509 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-09T16:33:16,519 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:17,083 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:17,368 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T16:33:17,469 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:17,522 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:18,084 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:18,215 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.meta.1733761968978.meta after 4002ms 2024-12-09T16:33:18,215 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/WALs/dfce6ce5b10c,38195,1733761967572/dfce6ce5b10c%2C38195%2C1733761967572.meta.1733761968978.meta to hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/oldWALs/dfce6ce5b10c%2C38195%2C1733761967572.meta.1733761968978.meta 2024-12-09T16:33:18,219 DEBUG [RS:0;dfce6ce5b10c:38195 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/oldWALs 2024-12-09T16:33:18,219 INFO [RS:0;dfce6ce5b10c:38195 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog dfce6ce5b10c%2C38195%2C1733761967572.meta:.meta(num 1733761994204) 2024-12-09T16:33:18,219 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:18,219 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:18,220 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:18,220 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:18,220 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:18,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42063 is added to blk_1073741840_1023 (size=1162) 2024-12-09T16:33:18,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34105 is added to blk_1073741840_1023 (size=1162) 2024-12-09T16:33:18,229 DEBUG [RS:0;dfce6ce5b10c:38195 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/oldWALs 2024-12-09T16:33:18,229 INFO [RS:0;dfce6ce5b10c:38195 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog dfce6ce5b10c%2C38195%2C1733761967572:(num 1733761994148) 2024-12-09T16:33:18,229 DEBUG [RS:0;dfce6ce5b10c:38195 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:33:18,229 INFO [RS:0;dfce6ce5b10c:38195 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T16:33:18,229 INFO [RS:0;dfce6ce5b10c:38195 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T16:33:18,229 INFO [RS:0;dfce6ce5b10c:38195 {}] hbase.ChoreService(370): Chore service for: regionserver/dfce6ce5b10c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T16:33:18,230 INFO [RS:0;dfce6ce5b10c:38195 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T16:33:18,230 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T16:33:18,230 INFO [RS:0;dfce6ce5b10c:38195 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38195 2024-12-09T16:33:18,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T16:33:18,277 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38195-0x100bfc623c50001, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/dfce6ce5b10c,38195,1733761967572 2024-12-09T16:33:18,278 INFO [RS:0;dfce6ce5b10c:38195 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T16:33:18,286 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [dfce6ce5b10c,38195,1733761967572] 2024-12-09T16:33:18,294 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/dfce6ce5b10c,38195,1733761967572 already deleted, retry=false 2024-12-09T16:33:18,294 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; dfce6ce5b10c,38195,1733761967572 expired; onlineServers=0 2024-12-09T16:33:18,294 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'dfce6ce5b10c,38925,1733761967402' ***** 2024-12-09T16:33:18,294 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T16:33:18,294 INFO [M:0;dfce6ce5b10c:38925 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T16:33:18,294 INFO [M:0;dfce6ce5b10c:38925 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T16:33:18,294 DEBUG [M:0;dfce6ce5b10c:38925 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T16:33:18,294 DEBUG [M:0;dfce6ce5b10c:38925 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T16:33:18,295 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.large.0-1733761968196 {}] cleaner.HFileCleaner(306): Exit Thread[master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.large.0-1733761968196,5,FailOnTimeoutGroup] 2024-12-09T16:33:18,295 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.small.0-1733761968196 {}] cleaner.HFileCleaner(306): Exit Thread[master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.small.0-1733761968196,5,FailOnTimeoutGroup] 2024-12-09T16:33:18,297 INFO [M:0;dfce6ce5b10c:38925 {}] hbase.ChoreService(370): Chore service for: master/dfce6ce5b10c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T16:33:18,297 INFO [M:0;dfce6ce5b10c:38925 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T16:33:18,297 DEBUG [M:0;dfce6ce5b10c:38925 {}] master.HMaster(1795): Stopping service threads 2024-12-09T16:33:18,297 INFO [M:0;dfce6ce5b10c:38925 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T16:33:18,297 INFO [M:0;dfce6ce5b10c:38925 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T16:33:18,297 INFO [M:0;dfce6ce5b10c:38925 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T16:33:18,297 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T16:33:18,298 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T16:33:18,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T16:33:18,302 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:33:18,303 DEBUG [M:0;dfce6ce5b10c:38925 {}] zookeeper.ZKUtil(347): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T16:33:18,303 WARN [M:0;dfce6ce5b10c:38925 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T16:33:18,304 INFO [M:0;dfce6ce5b10c:38925 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/.lastflushedseqids 2024-12-09T16:33:18,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34105 is added to blk_1073741846_1030 (size=130) 2024-12-09T16:33:18,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42063 is added to blk_1073741846_1030 (size=130) 2024-12-09T16:33:18,326 INFO [M:0;dfce6ce5b10c:38925 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T16:33:18,326 INFO [M:0;dfce6ce5b10c:38925 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T16:33:18,326 DEBUG [M:0;dfce6ce5b10c:38925 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T16:33:18,327 INFO [M:0;dfce6ce5b10c:38925 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:33:18,327 DEBUG [M:0;dfce6ce5b10c:38925 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:33:18,327 DEBUG [M:0;dfce6ce5b10c:38925 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T16:33:18,327 DEBUG [M:0;dfce6ce5b10c:38925 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:33:18,327 INFO [M:0;dfce6ce5b10c:38925 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-12-09T16:33:18,328 ERROR [FSHLog-0-hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData-prefix:dfce6ce5b10c,38925,1733761967402 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38629,DS-e5a10720-c573-4ca0-affc-a9872dd6d17c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:18,328 WARN [FSHLog-0-hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData-prefix:dfce6ce5b10c,38925,1733761967402 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38629,DS-e5a10720-c573-4ca0-affc-a9872dd6d17c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:18,328 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog dfce6ce5b10c%2C38925%2C1733761967402:(num 1733761967876) roll requested 2024-12-09T16:33:18,328 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C38925%2C1733761967402.1733761998328 2024-12-09T16:33:18,344 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:18,344 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:18,344 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:18,344 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:18,344 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:18,345 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/WALs/dfce6ce5b10c,38925,1733761967402/dfce6ce5b10c%2C38925%2C1733761967402.1733761967876 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/WALs/dfce6ce5b10c,38925,1733761967402/dfce6ce5b10c%2C38925%2C1733761967402.1733761998328 2024-12-09T16:33:18,345 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38629,DS-e5a10720-c573-4ca0-affc-a9872dd6d17c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:18,345 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38629,DS-e5a10720-c573-4ca0-affc-a9872dd6d17c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T16:33:18,345 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/WALs/dfce6ce5b10c,38925,1733761967402/dfce6ce5b10c%2C38925%2C1733761967402.1733761967876 2024-12-09T16:33:18,346 WARN [IPC Server handler 3 on default port 37495 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/WALs/dfce6ce5b10c,38925,1733761967402/dfce6ce5b10c%2C38925%2C1733761967402.1733761967876 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1014 2024-12-09T16:33:18,346 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/WALs/dfce6ce5b10c,38925,1733761967402/dfce6ce5b10c%2C38925%2C1733761967402.1733761967876 after 1ms 2024-12-09T16:33:18,355 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37855:37855),(127.0.0.1/127.0.0.1:35247:35247)] 2024-12-09T16:33:18,355 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/WALs/dfce6ce5b10c,38925,1733761967402/dfce6ce5b10c%2C38925%2C1733761967402.1733761967876 is not closed yet, will try archiving it next time 2024-12-09T16:33:18,378 DEBUG [M:0;dfce6ce5b10c:38925 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ec26fb2ef57e4d459e23e805ac1ebc7f is 82, key is hbase:meta,,1/info:regioninfo/1733761969047/Put/seqid=0 2024-12-09T16:33:18,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42063 is added to blk_1073741848_1033 (size=5672) 2024-12-09T16:33:18,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34105 is added to blk_1073741848_1033 (size=5672) 2024-12-09T16:33:18,385 INFO [M:0;dfce6ce5b10c:38925 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ec26fb2ef57e4d459e23e805ac1ebc7f 2024-12-09T16:33:18,386 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38195-0x100bfc623c50001, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:33:18,386 INFO [RS:0;dfce6ce5b10c:38195 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T16:33:18,386 INFO [RS:0;dfce6ce5b10c:38195 {}] regionserver.HRegionServer(1031): Exiting; stopping=dfce6ce5b10c,38195,1733761967572; zookeeper connection closed. 2024-12-09T16:33:18,386 DEBUG [pool-525-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38195-0x100bfc623c50001, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:33:18,386 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@dcfdfe {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@dcfdfe 2024-12-09T16:33:18,386 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T16:33:18,413 DEBUG [M:0;dfce6ce5b10c:38925 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/45c5455aefff4e1d84315c064d7fe610 is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733761969747/Put/seqid=0 2024-12-09T16:33:18,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34105 is added to blk_1073741849_1034 (size=6118) 2024-12-09T16:33:18,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42063 is added to blk_1073741849_1034 (size=6118) 2024-12-09T16:33:18,421 INFO [M:0;dfce6ce5b10c:38925 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/45c5455aefff4e1d84315c064d7fe610 2024-12-09T16:33:18,444 DEBUG [M:0;dfce6ce5b10c:38925 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4849b363da324ed8983317e90aa9030f is 69, key is dfce6ce5b10c,38195,1733761967572/rs:state/1733761968325/Put/seqid=0 2024-12-09T16:33:18,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42063 is added to blk_1073741850_1035 (size=5156) 2024-12-09T16:33:18,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34105 is added to blk_1073741850_1035 (size=5156) 2024-12-09T16:33:18,450 INFO [M:0;dfce6ce5b10c:38925 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4849b363da324ed8983317e90aa9030f 2024-12-09T16:33:18,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:18,478 DEBUG [M:0;dfce6ce5b10c:38925 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/43d5f17f22dc40c8b90c1aca03913416 is 52, key is load_balancer_on/state:d/1733761969255/Put/seqid=0 2024-12-09T16:33:18,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42063 is added to blk_1073741851_1036 (size=5056) 2024-12-09T16:33:18,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34105 is added to blk_1073741851_1036 (size=5056) 2024-12-09T16:33:18,485 INFO [M:0;dfce6ce5b10c:38925 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/43d5f17f22dc40c8b90c1aca03913416 2024-12-09T16:33:18,493 DEBUG [M:0;dfce6ce5b10c:38925 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ec26fb2ef57e4d459e23e805ac1ebc7f as hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ec26fb2ef57e4d459e23e805ac1ebc7f 2024-12-09T16:33:18,504 INFO [M:0;dfce6ce5b10c:38925 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ec26fb2ef57e4d459e23e805ac1ebc7f, entries=8, sequenceid=56, filesize=5.5 K 2024-12-09T16:33:18,506 DEBUG [M:0;dfce6ce5b10c:38925 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/45c5455aefff4e1d84315c064d7fe610 as hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/45c5455aefff4e1d84315c064d7fe610 2024-12-09T16:33:18,522 INFO [M:0;dfce6ce5b10c:38925 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/45c5455aefff4e1d84315c064d7fe610, entries=6, sequenceid=56, filesize=6.0 K 2024-12-09T16:33:18,524 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:18,531 DEBUG [M:0;dfce6ce5b10c:38925 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4849b363da324ed8983317e90aa9030f as hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4849b363da324ed8983317e90aa9030f 2024-12-09T16:33:18,545 INFO [M:0;dfce6ce5b10c:38925 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4849b363da324ed8983317e90aa9030f, entries=1, sequenceid=56, filesize=5.0 K 2024-12-09T16:33:18,547 DEBUG [M:0;dfce6ce5b10c:38925 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/43d5f17f22dc40c8b90c1aca03913416 as hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/43d5f17f22dc40c8b90c1aca03913416 2024-12-09T16:33:18,553 INFO [M:0;dfce6ce5b10c:38925 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/43d5f17f22dc40c8b90c1aca03913416, entries=1, sequenceid=56, filesize=4.9 K 2024-12-09T16:33:18,555 INFO [M:0;dfce6ce5b10c:38925 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 228ms, sequenceid=56, compaction requested=false 2024-12-09T16:33:18,556 INFO [M:0;dfce6ce5b10c:38925 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:33:18,556 DEBUG [M:0;dfce6ce5b10c:38925 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733761998326Disabling compacts and flushes for region at 1733761998326Disabling writes for close at 1733761998327 (+1 ms)Obtaining lock to block concurrent updates at 1733761998327Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733761998327Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1733761998327Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733761998356 (+29 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733761998356Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733761998378 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733761998378Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733761998392 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733761998412 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733761998412Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733761998427 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733761998443 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733761998443Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733761998457 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733761998477 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733761998477Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a1e7ea8: reopening flushed file at 1733761998492 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@a00f932: reopening flushed file at 1733761998505 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2fc89bc8: reopening flushed file at 1733761998522 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@189274da: reopening flushed file at 1733761998545 (+23 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 228ms, sequenceid=56, compaction requested=false at 1733761998555 (+10 ms)Writing region close event to WAL at 1733761998556 (+1 ms)Closed at 1733761998556 2024-12-09T16:33:18,557 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:18,557 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:18,557 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:18,557 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:18,557 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:18,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42063 is added to blk_1073741847_1031 (size=757) 2024-12-09T16:33:18,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34105 is added to blk_1073741847_1031 (size=757) 2024-12-09T16:33:19,085 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:19,388 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:19,388 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:19,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:19,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:19,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:19,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:19,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:19,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:19,410 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:19,410 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:19,410 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:19,413 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:19,418 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:19,418 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:19,471 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:19,508 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1014: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-09T16:33:19,525 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:19,922 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T16:33:19,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:19,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:19,924 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:19,924 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:19,951 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:19,952 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:19,952 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:19,953 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:19,953 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:19,953 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:19,958 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:19,959 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:19,960 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:19,963 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:19,972 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T16:33:19,972 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T16:33:19,972 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T16:33:19,972 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-09T16:33:20,086 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:20,472 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:20,526 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:21,087 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:21,472 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:21,526 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:22,087 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:22,350 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/WALs/dfce6ce5b10c,38925,1733761967402/dfce6ce5b10c%2C38925%2C1733761967402.1733761967876 after 4005ms 2024-12-09T16:33:22,352 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/WALs/dfce6ce5b10c,38925,1733761967402/dfce6ce5b10c%2C38925%2C1733761967402.1733761967876 to hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/oldWALs/dfce6ce5b10c%2C38925%2C1733761967402.1733761967876 2024-12-09T16:33:22,360 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/MasterData/oldWALs/dfce6ce5b10c%2C38925%2C1733761967402.1733761967876 to hdfs://localhost:37495/user/jenkins/test-data/9d9ef113-3b22-6375-9ce6-3b9b8a1f795e/oldWALs/dfce6ce5b10c%2C38925%2C1733761967402.1733761967876$masterlocalwal$ 2024-12-09T16:33:22,361 INFO [M:0;dfce6ce5b10c:38925 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T16:33:22,361 INFO [M:0;dfce6ce5b10c:38925 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38925 2024-12-09T16:33:22,362 INFO [M:0;dfce6ce5b10c:38925 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T16:33:22,362 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T16:33:22,473 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:22,527 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:22,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:33:22,577 INFO [M:0;dfce6ce5b10c:38925 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T16:33:22,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38925-0x100bfc623c50000, quorum=127.0.0.1:59507, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:33:22,581 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@507d2888{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:33:22,581 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5fc4ac56{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:33:22,581 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:33:22,582 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68d0885b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:33:22,582 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7544ba23{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/hadoop.log.dir/,STOPPED} 2024-12-09T16:33:22,583 WARN [BP-985864207-172.17.0.2-1733761965604 heartbeating to localhost/127.0.0.1:37495 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T16:33:22,583 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T16:33:22,583 WARN [BP-985864207-172.17.0.2-1733761965604 heartbeating to localhost/127.0.0.1:37495 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-985864207-172.17.0.2-1733761965604 (Datanode Uuid f7f267ed-ff2e-4163-a258-aa4a3188f4b7) service to localhost/127.0.0.1:37495 2024-12-09T16:33:22,583 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T16:33:22,584 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/cluster_89c688b7-6653-d246-c2bd-933622a33f0f/data/data3/current/BP-985864207-172.17.0.2-1733761965604 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:33:22,585 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/cluster_89c688b7-6653-d246-c2bd-933622a33f0f/data/data4/current/BP-985864207-172.17.0.2-1733761965604 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:33:22,585 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T16:33:22,619 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@25ec21ee{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:33:22,619 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@78fa9015{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:33:22,620 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:33:22,620 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@280932b3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:33:22,620 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@382189e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/hadoop.log.dir/,STOPPED} 2024-12-09T16:33:22,622 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T16:33:22,622 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T16:33:22,622 WARN [BP-985864207-172.17.0.2-1733761965604 heartbeating to localhost/127.0.0.1:37495 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T16:33:22,622 WARN [BP-985864207-172.17.0.2-1733761965604 heartbeating to localhost/127.0.0.1:37495 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-985864207-172.17.0.2-1733761965604 (Datanode Uuid f013fb29-3851-41b0-b30a-5cd23a07e941) service to localhost/127.0.0.1:37495 2024-12-09T16:33:22,623 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/cluster_89c688b7-6653-d246-c2bd-933622a33f0f/data/data1/current/BP-985864207-172.17.0.2-1733761965604 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:33:22,623 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/cluster_89c688b7-6653-d246-c2bd-933622a33f0f/data/data2/current/BP-985864207-172.17.0.2-1733761965604 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:33:22,623 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T16:33:22,630 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1411600e{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T16:33:22,630 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@64c51d47{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:33:22,630 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:33:22,631 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21b0ad14{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:33:22,631 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65a49e29{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/hadoop.log.dir/,STOPPED} 2024-12-09T16:33:22,639 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T16:33:22,670 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T16:33:22,680 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=184 (was 155) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:37495 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37495 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:37495 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:37495 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37495 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37495 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:37495 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37495 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 436) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=448 (was 518), ProcessCount=11 (was 11), AvailableMemoryMB=4962 (was 6759) 2024-12-09T16:33:22,690 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=184, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=448, ProcessCount=11, AvailableMemoryMB=4962 2024-12-09T16:33:22,690 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T16:33:22,690 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/hadoop.log.dir so I do NOT create it in target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d 2024-12-09T16:33:22,690 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3826dc59-1de2-7a89-f330-4f17c1dd951a/hadoop.tmp.dir so I do NOT create it in target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d 2024-12-09T16:33:22,690 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/cluster_ccb313ee-e923-ed98-ecc7-c0e6d98db88e, deleteOnExit=true 2024-12-09T16:33:22,690 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T16:33:22,691 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/test.cache.data in system properties and HBase conf 2024-12-09T16:33:22,691 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T16:33:22,691 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/hadoop.log.dir in system properties and HBase conf 2024-12-09T16:33:22,691 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T16:33:22,691 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T16:33:22,691 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T16:33:22,691 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T16:33:22,691 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T16:33:22,691 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T16:33:22,691 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T16:33:22,692 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T16:33:22,692 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T16:33:22,692 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T16:33:22,692 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T16:33:22,692 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T16:33:22,692 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T16:33:22,692 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/nfs.dump.dir in system properties and HBase conf 2024-12-09T16:33:22,692 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/java.io.tmpdir in system properties and HBase conf 2024-12-09T16:33:22,692 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T16:33:22,692 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T16:33:22,692 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T16:33:22,708 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T16:33:23,029 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:33:23,036 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:33:23,078 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:33:23,079 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:33:23,079 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T16:33:23,088 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:23,089 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:33:23,095 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4d17bbc5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:33:23,096 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1e7fd6e5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:33:23,207 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6e87766c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/java.io.tmpdir/jetty-localhost-42363-hadoop-hdfs-3_4_1-tests_jar-_-any-8857517486577705979/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T16:33:23,208 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@594a0329{HTTP/1.1, (http/1.1)}{localhost:42363} 2024-12-09T16:33:23,208 INFO [Time-limited test {}] server.Server(415): Started @202374ms 2024-12-09T16:33:23,228 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T16:33:23,474 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:23,513 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:33:23,521 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:33:23,528 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:23,530 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:33:23,530 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:33:23,530 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T16:33:23,538 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3dd2af12{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:33:23,538 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@766bbc26{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:33:23,642 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@31ad985a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/java.io.tmpdir/jetty-localhost-37337-hadoop-hdfs-3_4_1-tests_jar-_-any-12459339941287371539/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:33:23,642 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@37c97193{HTTP/1.1, (http/1.1)}{localhost:37337} 2024-12-09T16:33:23,642 INFO [Time-limited test {}] server.Server(415): Started @202808ms 2024-12-09T16:33:23,643 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T16:33:23,719 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:33:23,729 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:33:23,734 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:33:23,734 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:33:23,735 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T16:33:23,735 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@14a397e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:33:23,735 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c6bc9f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:33:23,866 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@570a7045{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/java.io.tmpdir/jetty-localhost-45577-hadoop-hdfs-3_4_1-tests_jar-_-any-12127515730806821524/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:33:23,866 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@36d948b4{HTTP/1.1, (http/1.1)}{localhost:45577} 2024-12-09T16:33:23,867 INFO [Time-limited test {}] server.Server(415): Started @203032ms 2024-12-09T16:33:23,868 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T16:33:24,089 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:24,391 WARN [Thread-1650 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/cluster_ccb313ee-e923-ed98-ecc7-c0e6d98db88e/data/data2/current/BP-2412976-172.17.0.2-1733762002720/current, will proceed with Du for space computation calculation, 2024-12-09T16:33:24,391 WARN [Thread-1649 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/cluster_ccb313ee-e923-ed98-ecc7-c0e6d98db88e/data/data1/current/BP-2412976-172.17.0.2-1733762002720/current, will proceed with Du for space computation calculation, 2024-12-09T16:33:24,413 WARN [Thread-1614 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T16:33:24,417 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2a08d52c292c9176 with lease ID 0x52b0069c065ebf8a: Processing first storage report for DS-598a3b67-e5e1-4d4b-b97d-3b550d3a644e from datanode DatanodeRegistration(127.0.0.1:43725, datanodeUuid=478ecbb2-78ef-4970-9929-344297393ed3, infoPort=37783, infoSecurePort=0, ipcPort=36885, storageInfo=lv=-57;cid=testClusterID;nsid=559375341;c=1733762002720) 2024-12-09T16:33:24,417 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2a08d52c292c9176 with lease ID 0x52b0069c065ebf8a: from storage DS-598a3b67-e5e1-4d4b-b97d-3b550d3a644e node DatanodeRegistration(127.0.0.1:43725, datanodeUuid=478ecbb2-78ef-4970-9929-344297393ed3, infoPort=37783, infoSecurePort=0, ipcPort=36885, storageInfo=lv=-57;cid=testClusterID;nsid=559375341;c=1733762002720), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:33:24,417 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2a08d52c292c9176 with lease ID 0x52b0069c065ebf8a: Processing first storage report for DS-165db6db-27a0-495a-bbf2-f9a5d381e48e from datanode DatanodeRegistration(127.0.0.1:43725, datanodeUuid=478ecbb2-78ef-4970-9929-344297393ed3, infoPort=37783, infoSecurePort=0, ipcPort=36885, storageInfo=lv=-57;cid=testClusterID;nsid=559375341;c=1733762002720) 2024-12-09T16:33:24,417 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2a08d52c292c9176 with lease ID 0x52b0069c065ebf8a: from storage DS-165db6db-27a0-495a-bbf2-f9a5d381e48e node DatanodeRegistration(127.0.0.1:43725, datanodeUuid=478ecbb2-78ef-4970-9929-344297393ed3, infoPort=37783, infoSecurePort=0, ipcPort=36885, storageInfo=lv=-57;cid=testClusterID;nsid=559375341;c=1733762002720), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:33:24,474 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:24,529 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:24,658 WARN [Thread-1661 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/cluster_ccb313ee-e923-ed98-ecc7-c0e6d98db88e/data/data3/current/BP-2412976-172.17.0.2-1733762002720/current, will proceed with Du for space computation calculation, 2024-12-09T16:33:24,663 WARN [Thread-1662 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/cluster_ccb313ee-e923-ed98-ecc7-c0e6d98db88e/data/data4/current/BP-2412976-172.17.0.2-1733762002720/current, will proceed with Du for space computation calculation, 2024-12-09T16:33:24,687 WARN [Thread-1637 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T16:33:24,690 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x909d986c7e022a76 with lease ID 0x52b0069c065ebf8b: Processing first storage report for DS-8de2198e-3dd6-40ed-8df9-76d7d87b9af2 from datanode DatanodeRegistration(127.0.0.1:45183, datanodeUuid=e44e6410-c0d8-430f-bb34-915d73b562a1, infoPort=34149, infoSecurePort=0, ipcPort=39021, storageInfo=lv=-57;cid=testClusterID;nsid=559375341;c=1733762002720) 2024-12-09T16:33:24,690 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x909d986c7e022a76 with lease ID 0x52b0069c065ebf8b: from storage DS-8de2198e-3dd6-40ed-8df9-76d7d87b9af2 node DatanodeRegistration(127.0.0.1:45183, datanodeUuid=e44e6410-c0d8-430f-bb34-915d73b562a1, infoPort=34149, infoSecurePort=0, ipcPort=39021, storageInfo=lv=-57;cid=testClusterID;nsid=559375341;c=1733762002720), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:33:24,690 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x909d986c7e022a76 with lease ID 0x52b0069c065ebf8b: Processing first storage report for DS-8b12c193-d0b3-4982-8133-9fa10a7b309e from datanode DatanodeRegistration(127.0.0.1:45183, datanodeUuid=e44e6410-c0d8-430f-bb34-915d73b562a1, infoPort=34149, infoSecurePort=0, ipcPort=39021, storageInfo=lv=-57;cid=testClusterID;nsid=559375341;c=1733762002720) 2024-12-09T16:33:24,690 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x909d986c7e022a76 with lease ID 0x52b0069c065ebf8b: from storage DS-8b12c193-d0b3-4982-8133-9fa10a7b309e node DatanodeRegistration(127.0.0.1:45183, datanodeUuid=e44e6410-c0d8-430f-bb34-915d73b562a1, infoPort=34149, infoSecurePort=0, ipcPort=39021, storageInfo=lv=-57;cid=testClusterID;nsid=559375341;c=1733762002720), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:33:24,725 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d 2024-12-09T16:33:24,728 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/cluster_ccb313ee-e923-ed98-ecc7-c0e6d98db88e/zookeeper_0, clientPort=60314, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/cluster_ccb313ee-e923-ed98-ecc7-c0e6d98db88e/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/cluster_ccb313ee-e923-ed98-ecc7-c0e6d98db88e/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T16:33:24,728 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60314 2024-12-09T16:33:24,729 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:33:24,730 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:33:24,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741825_1001 (size=7) 2024-12-09T16:33:24,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741825_1001 (size=7) 2024-12-09T16:33:24,742 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4 with version=8 2024-12-09T16:33:24,743 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/hbase-staging 2024-12-09T16:33:24,757 INFO [Time-limited test {}] client.ConnectionUtils(128): master/dfce6ce5b10c:0 server-side Connection retries=45 2024-12-09T16:33:24,758 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:33:24,758 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T16:33:24,758 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T16:33:24,758 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:33:24,759 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T16:33:24,759 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T16:33:24,759 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T16:33:24,770 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36223 2024-12-09T16:33:24,772 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36223 connecting to ZooKeeper ensemble=127.0.0.1:60314 2024-12-09T16:33:24,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:362230x0, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T16:33:24,858 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36223-0x100bfc6b5b10000 connected 2024-12-09T16:33:24,923 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:33:24,924 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:33:24,926 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:33:24,927 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4, hbase.cluster.distributed=false 2024-12-09T16:33:24,929 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T16:33:24,929 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36223 2024-12-09T16:33:24,929 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36223 2024-12-09T16:33:24,930 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36223 2024-12-09T16:33:24,930 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36223 2024-12-09T16:33:24,930 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36223 2024-12-09T16:33:24,946 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/dfce6ce5b10c:0 server-side Connection retries=45 2024-12-09T16:33:24,946 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:33:24,946 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T16:33:24,946 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T16:33:24,946 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:33:24,946 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T16:33:24,947 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T16:33:24,947 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T16:33:24,948 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38447 2024-12-09T16:33:24,949 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38447 connecting to ZooKeeper ensemble=127.0.0.1:60314 2024-12-09T16:33:24,951 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:33:24,953 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:33:24,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:384470x0, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T16:33:24,995 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38447-0x100bfc6b5b10001 connected 2024-12-09T16:33:25,002 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38447-0x100bfc6b5b10001, quorum=127.0.0.1:60314, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:33:25,003 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T16:33:25,014 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T16:33:25,015 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38447-0x100bfc6b5b10001, quorum=127.0.0.1:60314, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T16:33:25,017 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38447-0x100bfc6b5b10001, quorum=127.0.0.1:60314, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T16:33:25,017 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38447 2024-12-09T16:33:25,018 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38447 2024-12-09T16:33:25,022 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38447 2024-12-09T16:33:25,034 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38447 2024-12-09T16:33:25,042 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38447 2024-12-09T16:33:25,058 DEBUG [M:0;dfce6ce5b10c:36223 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;dfce6ce5b10c:36223 2024-12-09T16:33:25,062 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/dfce6ce5b10c,36223,1733762004745 2024-12-09T16:33:25,090 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:25,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:33:25,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38447-0x100bfc6b5b10001, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:33:25,267 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/dfce6ce5b10c,36223,1733762004745 2024-12-09T16:33:25,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38447-0x100bfc6b5b10001, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T16:33:25,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38447-0x100bfc6b5b10001, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:33:25,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:33:25,312 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T16:33:25,312 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/dfce6ce5b10c,36223,1733762004745 from backup master directory 2024-12-09T16:33:25,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/dfce6ce5b10c,36223,1733762004745 2024-12-09T16:33:25,403 WARN [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T16:33:25,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:33:25,403 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=dfce6ce5b10c,36223,1733762004745 2024-12-09T16:33:25,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38447-0x100bfc6b5b10001, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:33:25,419 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/hbase.id] with ID: 59f4dd43-0d73-4077-af52-f92b71e6a992 2024-12-09T16:33:25,419 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/.tmp/hbase.id 2024-12-09T16:33:25,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741826_1002 (size=42) 2024-12-09T16:33:25,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741826_1002 (size=42) 2024-12-09T16:33:25,437 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/.tmp/hbase.id]:[hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/hbase.id] 2024-12-09T16:33:25,449 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:33:25,449 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T16:33:25,451 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-09T16:33:25,475 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T16:33:25,475 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:25,478 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:25,478 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:25,478 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:25,479 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:25,479 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:25,479 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:25,501 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:25,502 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:25,502 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:25,502 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:25,502 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:25,503 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:25,507 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:25,507 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:25,508 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:25,511 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:25,530 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:25,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38447-0x100bfc6b5b10001, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:33:25,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:33:25,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741827_1003 (size=196) 2024-12-09T16:33:25,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741827_1003 (size=196) 2024-12-09T16:33:25,675 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T16:33:25,676 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T16:33:25,678 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T16:33:25,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741828_1004 (size=1189) 2024-12-09T16:33:25,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741828_1004 (size=1189) 2024-12-09T16:33:25,701 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/data/master/store 2024-12-09T16:33:25,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741829_1005 (size=34) 2024-12-09T16:33:25,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741829_1005 (size=34) 2024-12-09T16:33:25,715 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:33:25,715 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T16:33:25,715 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:33:25,715 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:33:25,715 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T16:33:25,715 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:33:25,715 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:33:25,715 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733762005715Disabling compacts and flushes for region at 1733762005715Disabling writes for close at 1733762005715Writing region close event to WAL at 1733762005715Closed at 1733762005715 2024-12-09T16:33:25,716 WARN [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/data/master/store/.initializing 2024-12-09T16:33:25,716 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/WALs/dfce6ce5b10c,36223,1733762004745 2024-12-09T16:33:25,719 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=dfce6ce5b10c%2C36223%2C1733762004745, suffix=, logDir=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/WALs/dfce6ce5b10c,36223,1733762004745, archiveDir=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/oldWALs, maxLogs=10 2024-12-09T16:33:25,719 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C36223%2C1733762004745.1733762005719 2024-12-09T16:33:25,731 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/WALs/dfce6ce5b10c,36223,1733762004745/dfce6ce5b10c%2C36223%2C1733762004745.1733762005719 2024-12-09T16:33:25,734 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37783:37783),(127.0.0.1/127.0.0.1:34149:34149)] 2024-12-09T16:33:25,737 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T16:33:25,737 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:33:25,737 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:33:25,737 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:33:25,739 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:33:25,740 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T16:33:25,741 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:33:25,741 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:33:25,741 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:33:25,742 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T16:33:25,743 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:33:25,743 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:33:25,743 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:33:25,745 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T16:33:25,745 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:33:25,745 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:33:25,745 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:33:25,747 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T16:33:25,747 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:33:25,747 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:33:25,747 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:33:25,748 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:33:25,749 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:33:25,751 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:33:25,751 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:33:25,753 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T16:33:25,754 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:33:25,757 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T16:33:25,757 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=799029, jitterRate=0.01601909101009369}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T16:33:25,758 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733762005737Initializing all the Stores at 1733762005738 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733762005738Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733762005739 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733762005739Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733762005739Cleaning up temporary data from old regions at 1733762005751 (+12 ms)Region opened successfully at 1733762005758 (+7 ms) 2024-12-09T16:33:25,758 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T16:33:25,762 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@498edfe3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=dfce6ce5b10c/172.17.0.2:0 2024-12-09T16:33:25,763 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T16:33:25,763 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T16:33:25,763 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T16:33:25,763 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T16:33:25,764 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T16:33:25,764 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T16:33:25,764 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T16:33:25,767 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T16:33:25,768 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T16:33:25,856 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T16:33:25,856 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T16:33:25,857 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T16:33:25,934 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T16:33:25,935 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T16:33:25,936 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T16:33:25,985 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T16:33:25,986 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T16:33:26,006 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T16:33:26,009 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T16:33:26,019 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T16:33:26,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38447-0x100bfc6b5b10001, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T16:33:26,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T16:33:26,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:33:26,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38447-0x100bfc6b5b10001, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:33:26,031 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=dfce6ce5b10c,36223,1733762004745, sessionid=0x100bfc6b5b10000, setting cluster-up flag (Was=false) 2024-12-09T16:33:26,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38447-0x100bfc6b5b10001, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:33:26,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:33:26,072 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T16:33:26,074 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=dfce6ce5b10c,36223,1733762004745 2024-12-09T16:33:26,089 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:33:26,089 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38447-0x100bfc6b5b10001, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:33:26,090 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:26,114 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T16:33:26,116 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=dfce6ce5b10c,36223,1733762004745 2024-12-09T16:33:26,123 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T16:33:26,125 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T16:33:26,126 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T16:33:26,126 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T16:33:26,126 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: dfce6ce5b10c,36223,1733762004745 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T16:33:26,128 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/dfce6ce5b10c:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:33:26,128 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/dfce6ce5b10c:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:33:26,129 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:33:26,129 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:33:26,129 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/dfce6ce5b10c:0, corePoolSize=10, maxPoolSize=10 2024-12-09T16:33:26,129 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:33:26,129 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=2, maxPoolSize=2 2024-12-09T16:33:26,129 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:33:26,139 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T16:33:26,139 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T16:33:26,140 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:33:26,140 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T16:33:26,146 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733762036146 2024-12-09T16:33:26,146 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T16:33:26,146 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T16:33:26,146 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T16:33:26,146 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T16:33:26,146 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T16:33:26,146 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T16:33:26,152 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T16:33:26,152 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T16:33:26,153 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T16:33:26,153 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T16:33:26,153 INFO [RS:0;dfce6ce5b10c:38447 {}] regionserver.HRegionServer(746): ClusterId : 59f4dd43-0d73-4077-af52-f92b71e6a992 2024-12-09T16:33:26,153 DEBUG [RS:0;dfce6ce5b10c:38447 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T16:33:26,158 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T16:33:26,158 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T16:33:26,163 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.large.0-1733762006158,5,FailOnTimeoutGroup] 2024-12-09T16:33:26,164 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.small.0-1733762006163,5,FailOnTimeoutGroup] 2024-12-09T16:33:26,164 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T16:33:26,164 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T16:33:26,164 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T16:33:26,164 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T16:33:26,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741831_1007 (size=1321) 2024-12-09T16:33:26,166 DEBUG [RS:0;dfce6ce5b10c:38447 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T16:33:26,166 DEBUG [RS:0;dfce6ce5b10c:38447 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T16:33:26,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741831_1007 (size=1321) 2024-12-09T16:33:26,167 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T16:33:26,167 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4 2024-12-09T16:33:26,174 DEBUG [RS:0;dfce6ce5b10c:38447 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T16:33:26,175 DEBUG [RS:0;dfce6ce5b10c:38447 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57bfbe53, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=dfce6ce5b10c/172.17.0.2:0 2024-12-09T16:33:26,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741832_1008 (size=32) 2024-12-09T16:33:26,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741832_1008 (size=32) 2024-12-09T16:33:26,200 DEBUG [RS:0;dfce6ce5b10c:38447 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;dfce6ce5b10c:38447 2024-12-09T16:33:26,200 INFO [RS:0;dfce6ce5b10c:38447 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T16:33:26,200 INFO [RS:0;dfce6ce5b10c:38447 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T16:33:26,200 DEBUG [RS:0;dfce6ce5b10c:38447 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T16:33:26,201 INFO [RS:0;dfce6ce5b10c:38447 {}] regionserver.HRegionServer(2659): reportForDuty to master=dfce6ce5b10c,36223,1733762004745 with port=38447, startcode=1733762004946 2024-12-09T16:33:26,201 DEBUG [RS:0;dfce6ce5b10c:38447 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T16:33:26,207 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42199, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T16:33:26,208 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36223 {}] master.ServerManager(363): Checking decommissioned status of RegionServer dfce6ce5b10c,38447,1733762004946 2024-12-09T16:33:26,208 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36223 {}] master.ServerManager(517): Registering regionserver=dfce6ce5b10c,38447,1733762004946 2024-12-09T16:33:26,210 DEBUG [RS:0;dfce6ce5b10c:38447 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4 2024-12-09T16:33:26,211 DEBUG [RS:0;dfce6ce5b10c:38447 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42743 2024-12-09T16:33:26,211 DEBUG [RS:0;dfce6ce5b10c:38447 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T16:33:26,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T16:33:26,221 DEBUG [RS:0;dfce6ce5b10c:38447 {}] zookeeper.ZKUtil(111): regionserver:38447-0x100bfc6b5b10001, quorum=127.0.0.1:60314, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/dfce6ce5b10c,38447,1733762004946 2024-12-09T16:33:26,221 WARN [RS:0;dfce6ce5b10c:38447 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T16:33:26,221 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [dfce6ce5b10c,38447,1733762004946] 2024-12-09T16:33:26,221 INFO [RS:0;dfce6ce5b10c:38447 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T16:33:26,221 DEBUG [RS:0;dfce6ce5b10c:38447 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/WALs/dfce6ce5b10c,38447,1733762004946 2024-12-09T16:33:26,230 INFO [RS:0;dfce6ce5b10c:38447 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T16:33:26,232 INFO [RS:0;dfce6ce5b10c:38447 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T16:33:26,234 INFO [RS:0;dfce6ce5b10c:38447 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T16:33:26,234 INFO [RS:0;dfce6ce5b10c:38447 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:33:26,254 INFO [RS:0;dfce6ce5b10c:38447 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T16:33:26,262 INFO [RS:0;dfce6ce5b10c:38447 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T16:33:26,262 INFO [RS:0;dfce6ce5b10c:38447 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T16:33:26,263 DEBUG [RS:0;dfce6ce5b10c:38447 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:33:26,263 DEBUG [RS:0;dfce6ce5b10c:38447 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:33:26,263 DEBUG [RS:0;dfce6ce5b10c:38447 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:33:26,263 DEBUG [RS:0;dfce6ce5b10c:38447 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:33:26,263 DEBUG [RS:0;dfce6ce5b10c:38447 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:33:26,263 DEBUG [RS:0;dfce6ce5b10c:38447 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/dfce6ce5b10c:0, corePoolSize=2, maxPoolSize=2 2024-12-09T16:33:26,263 DEBUG [RS:0;dfce6ce5b10c:38447 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:33:26,263 DEBUG [RS:0;dfce6ce5b10c:38447 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:33:26,263 DEBUG [RS:0;dfce6ce5b10c:38447 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:33:26,263 DEBUG [RS:0;dfce6ce5b10c:38447 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:33:26,263 DEBUG [RS:0;dfce6ce5b10c:38447 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:33:26,263 DEBUG [RS:0;dfce6ce5b10c:38447 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:33:26,263 DEBUG [RS:0;dfce6ce5b10c:38447 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/dfce6ce5b10c:0, corePoolSize=3, maxPoolSize=3 2024-12-09T16:33:26,263 DEBUG [RS:0;dfce6ce5b10c:38447 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0, corePoolSize=3, maxPoolSize=3 2024-12-09T16:33:26,265 INFO [RS:0;dfce6ce5b10c:38447 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T16:33:26,266 INFO [RS:0;dfce6ce5b10c:38447 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T16:33:26,266 INFO [RS:0;dfce6ce5b10c:38447 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:33:26,266 INFO [RS:0;dfce6ce5b10c:38447 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T16:33:26,266 INFO [RS:0;dfce6ce5b10c:38447 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T16:33:26,266 INFO [RS:0;dfce6ce5b10c:38447 {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,38447,1733762004946-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T16:33:26,289 INFO [RS:0;dfce6ce5b10c:38447 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T16:33:26,290 INFO [RS:0;dfce6ce5b10c:38447 {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,38447,1733762004946-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:33:26,290 INFO [RS:0;dfce6ce5b10c:38447 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:33:26,290 INFO [RS:0;dfce6ce5b10c:38447 {}] regionserver.Replication(171): dfce6ce5b10c,38447,1733762004946 started 2024-12-09T16:33:26,312 INFO [RS:0;dfce6ce5b10c:38447 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:33:26,313 INFO [RS:0;dfce6ce5b10c:38447 {}] regionserver.HRegionServer(1482): Serving as dfce6ce5b10c,38447,1733762004946, RpcServer on dfce6ce5b10c/172.17.0.2:38447, sessionid=0x100bfc6b5b10001 2024-12-09T16:33:26,313 DEBUG [RS:0;dfce6ce5b10c:38447 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T16:33:26,313 DEBUG [RS:0;dfce6ce5b10c:38447 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager dfce6ce5b10c,38447,1733762004946 2024-12-09T16:33:26,313 DEBUG [RS:0;dfce6ce5b10c:38447 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'dfce6ce5b10c,38447,1733762004946' 2024-12-09T16:33:26,313 DEBUG [RS:0;dfce6ce5b10c:38447 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T16:33:26,314 DEBUG [RS:0;dfce6ce5b10c:38447 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T16:33:26,314 DEBUG [RS:0;dfce6ce5b10c:38447 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T16:33:26,314 DEBUG [RS:0;dfce6ce5b10c:38447 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T16:33:26,314 DEBUG [RS:0;dfce6ce5b10c:38447 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager dfce6ce5b10c,38447,1733762004946 2024-12-09T16:33:26,314 DEBUG [RS:0;dfce6ce5b10c:38447 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'dfce6ce5b10c,38447,1733762004946' 2024-12-09T16:33:26,314 DEBUG [RS:0;dfce6ce5b10c:38447 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T16:33:26,315 DEBUG [RS:0;dfce6ce5b10c:38447 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T16:33:26,315 DEBUG [RS:0;dfce6ce5b10c:38447 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T16:33:26,316 INFO [RS:0;dfce6ce5b10c:38447 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T16:33:26,316 INFO [RS:0;dfce6ce5b10c:38447 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T16:33:26,419 INFO [RS:0;dfce6ce5b10c:38447 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=dfce6ce5b10c%2C38447%2C1733762004946, suffix=, logDir=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/WALs/dfce6ce5b10c,38447,1733762004946, archiveDir=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/oldWALs, maxLogs=32 2024-12-09T16:33:26,420 INFO [RS:0;dfce6ce5b10c:38447 {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C38447%2C1733762004946.1733762006420 2024-12-09T16:33:26,447 INFO [RS:0;dfce6ce5b10c:38447 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/WALs/dfce6ce5b10c,38447,1733762004946/dfce6ce5b10c%2C38447%2C1733762004946.1733762006420 2024-12-09T16:33:26,466 DEBUG [RS:0;dfce6ce5b10c:38447 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34149:34149),(127.0.0.1/127.0.0.1:37783:37783)] 2024-12-09T16:33:26,476 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:26,530 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:26,578 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:33:26,581 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T16:33:26,584 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T16:33:26,584 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:33:26,585 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:33:26,585 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T16:33:26,588 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T16:33:26,588 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:33:26,590 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:33:26,590 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T16:33:26,592 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T16:33:26,592 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:33:26,593 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:33:26,593 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T16:33:26,595 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T16:33:26,596 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:33:26,596 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:33:26,597 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T16:33:26,598 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/hbase/meta/1588230740 2024-12-09T16:33:26,598 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/hbase/meta/1588230740 2024-12-09T16:33:26,600 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T16:33:26,600 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T16:33:26,601 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T16:33:26,603 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T16:33:26,607 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T16:33:26,607 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=871896, jitterRate=0.10867376625537872}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T16:33:26,608 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733762006578Initializing all the Stores at 1733762006579 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733762006579Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733762006581 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733762006581Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733762006581Cleaning up temporary data from old regions at 1733762006600 (+19 ms)Region opened successfully at 1733762006608 (+8 ms) 2024-12-09T16:33:26,608 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T16:33:26,608 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T16:33:26,609 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T16:33:26,609 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T16:33:26,609 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T16:33:26,626 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T16:33:26,626 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733762006608Disabling compacts and flushes for region at 1733762006608Disabling writes for close at 1733762006609 (+1 ms)Writing region close event to WAL at 1733762006626 (+17 ms)Closed at 1733762006626 2024-12-09T16:33:26,628 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T16:33:26,628 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T16:33:26,628 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T16:33:26,631 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T16:33:26,633 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T16:33:26,784 DEBUG [dfce6ce5b10c:36223 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T16:33:26,784 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=dfce6ce5b10c,38447,1733762004946 2024-12-09T16:33:26,786 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as dfce6ce5b10c,38447,1733762004946, state=OPENING 2024-12-09T16:33:26,820 DEBUG [PEWorker-4 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T16:33:26,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:33:26,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38447-0x100bfc6b5b10001, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:33:26,832 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:33:26,832 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T16:33:26,832 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=dfce6ce5b10c,38447,1733762004946}] 2024-12-09T16:33:26,834 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:33:26,988 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T16:33:26,990 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40289, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T16:33:26,994 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T16:33:26,994 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T16:33:26,997 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=dfce6ce5b10c%2C38447%2C1733762004946.meta, suffix=.meta, logDir=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/WALs/dfce6ce5b10c,38447,1733762004946, archiveDir=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/oldWALs, maxLogs=32 2024-12-09T16:33:26,997 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C38447%2C1733762004946.meta.1733762006997.meta 2024-12-09T16:33:27,059 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/WALs/dfce6ce5b10c,38447,1733762004946/dfce6ce5b10c%2C38447%2C1733762004946.meta.1733762006997.meta 2024-12-09T16:33:27,091 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:27,099 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34149:34149),(127.0.0.1/127.0.0.1:37783:37783)] 2024-12-09T16:33:27,103 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T16:33:27,104 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T16:33:27,104 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T16:33:27,104 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T16:33:27,104 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T16:33:27,104 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:33:27,104 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T16:33:27,105 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T16:33:27,112 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T16:33:27,115 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T16:33:27,115 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:33:27,116 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:33:27,116 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T16:33:27,118 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T16:33:27,118 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:33:27,119 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:33:27,120 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T16:33:27,121 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T16:33:27,121 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:33:27,122 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:33:27,123 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T16:33:27,124 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T16:33:27,125 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:33:27,127 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:33:27,127 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T16:33:27,128 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/hbase/meta/1588230740 2024-12-09T16:33:27,130 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/hbase/meta/1588230740 2024-12-09T16:33:27,133 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T16:33:27,133 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T16:33:27,134 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T16:33:27,136 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T16:33:27,137 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=797174, jitterRate=0.013659745454788208}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T16:33:27,138 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T16:33:27,138 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733762007105Writing region info on filesystem at 1733762007105Initializing all the Stores at 1733762007107 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733762007107Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733762007111 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733762007111Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733762007111Cleaning up temporary data from old regions at 1733762007133 (+22 ms)Running coprocessor post-open hooks at 1733762007138 (+5 ms)Region opened successfully at 1733762007138 2024-12-09T16:33:27,140 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733762006987 2024-12-09T16:33:27,145 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T16:33:27,145 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T16:33:27,146 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=dfce6ce5b10c,38447,1733762004946 2024-12-09T16:33:27,148 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as dfce6ce5b10c,38447,1733762004946, state=OPEN 2024-12-09T16:33:27,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T16:33:27,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38447-0x100bfc6b5b10001, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T16:33:27,222 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=dfce6ce5b10c,38447,1733762004946 2024-12-09T16:33:27,222 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:33:27,222 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:33:27,231 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T16:33:27,231 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=dfce6ce5b10c,38447,1733762004946 in 390 msec 2024-12-09T16:33:27,239 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T16:33:27,239 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 603 msec 2024-12-09T16:33:27,241 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T16:33:27,241 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T16:33:27,243 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T16:33:27,244 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=dfce6ce5b10c,38447,1733762004946, seqNum=-1] 2024-12-09T16:33:27,244 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T16:33:27,247 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45183, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T16:33:27,257 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1300 sec 2024-12-09T16:33:27,258 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733762007257, completionTime=-1 2024-12-09T16:33:27,258 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T16:33:27,258 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T16:33:27,261 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T16:33:27,262 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733762067262 2024-12-09T16:33:27,262 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733762127262 2024-12-09T16:33:27,262 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 3 msec 2024-12-09T16:33:27,262 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,36223,1733762004745-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:33:27,262 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,36223,1733762004745-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:33:27,262 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,36223,1733762004745-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:33:27,263 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-dfce6ce5b10c:36223, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:33:27,263 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T16:33:27,263 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T16:33:27,267 DEBUG [master/dfce6ce5b10c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T16:33:27,269 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.866sec 2024-12-09T16:33:27,270 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T16:33:27,270 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T16:33:27,270 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T16:33:27,270 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T16:33:27,270 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T16:33:27,270 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,36223,1733762004745-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T16:33:27,270 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,36223,1733762004745-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T16:33:27,279 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T16:33:27,280 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T16:33:27,280 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,36223,1733762004745-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:33:27,353 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2caf2313, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T16:33:27,353 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request dfce6ce5b10c,36223,-1 for getting cluster id 2024-12-09T16:33:27,353 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T16:33:27,357 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '59f4dd43-0d73-4077-af52-f92b71e6a992' 2024-12-09T16:33:27,357 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T16:33:27,358 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "59f4dd43-0d73-4077-af52-f92b71e6a992" 2024-12-09T16:33:27,358 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@603bde42, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T16:33:27,358 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [dfce6ce5b10c,36223,-1] 2024-12-09T16:33:27,359 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T16:33:27,359 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:33:27,361 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34098, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T16:33:27,363 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fef2b0e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T16:33:27,364 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T16:33:27,366 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=dfce6ce5b10c,38447,1733762004946, seqNum=-1] 2024-12-09T16:33:27,367 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T16:33:27,369 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57984, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T16:33:27,371 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=dfce6ce5b10c,36223,1733762004745 2024-12-09T16:33:27,372 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:33:27,376 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T16:33:27,376 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T16:33:27,378 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is dfce6ce5b10c,36223,1733762004745 2024-12-09T16:33:27,378 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2c7b122c 2024-12-09T16:33:27,378 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T16:33:27,380 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34108, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T16:33:27,380 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36223 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T16:33:27,381 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36223 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T16:33:27,381 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36223 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T16:33:27,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36223 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T16:33:27,385 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T16:33:27,385 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:33:27,386 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36223 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-12-09T16:33:27,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36223 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T16:33:27,388 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T16:33:27,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741835_1011 (size=405) 2024-12-09T16:33:27,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741835_1011 (size=405) 2024-12-09T16:33:27,419 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0679d032e7164c492eb55b7afc1f1438, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4 2024-12-09T16:33:27,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741836_1012 (size=88) 2024-12-09T16:33:27,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741836_1012 (size=88) 2024-12-09T16:33:27,455 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:33:27,455 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 0679d032e7164c492eb55b7afc1f1438, disabling compactions & flushes 2024-12-09T16:33:27,455 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438. 2024-12-09T16:33:27,455 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438. 2024-12-09T16:33:27,455 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438. after waiting 0 ms 2024-12-09T16:33:27,455 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438. 2024-12-09T16:33:27,455 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438. 2024-12-09T16:33:27,455 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0679d032e7164c492eb55b7afc1f1438: Waiting for close lock at 1733762007455Disabling compacts and flushes for region at 1733762007455Disabling writes for close at 1733762007455Writing region close event to WAL at 1733762007455Closed at 1733762007455 2024-12-09T16:33:27,457 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T16:33:27,457 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1733762007457"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733762007457"}]},"ts":"1733762007457"} 2024-12-09T16:33:27,461 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T16:33:27,463 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T16:33:27,464 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733762007463"}]},"ts":"1733762007463"} 2024-12-09T16:33:27,467 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-12-09T16:33:27,468 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=0679d032e7164c492eb55b7afc1f1438, ASSIGN}] 2024-12-09T16:33:27,470 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=0679d032e7164c492eb55b7afc1f1438, ASSIGN 2024-12-09T16:33:27,472 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=0679d032e7164c492eb55b7afc1f1438, ASSIGN; state=OFFLINE, location=dfce6ce5b10c,38447,1733762004946; forceNewPlan=false, retain=false 2024-12-09T16:33:27,476 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:27,531 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:27,623 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0679d032e7164c492eb55b7afc1f1438, regionState=OPENING, regionLocation=dfce6ce5b10c,38447,1733762004946 2024-12-09T16:33:27,628 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=0679d032e7164c492eb55b7afc1f1438, ASSIGN because future has completed 2024-12-09T16:33:27,628 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0679d032e7164c492eb55b7afc1f1438, server=dfce6ce5b10c,38447,1733762004946}] 2024-12-09T16:33:27,788 INFO [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438. 2024-12-09T16:33:27,788 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 0679d032e7164c492eb55b7afc1f1438, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438.', STARTKEY => '', ENDKEY => ''} 2024-12-09T16:33:27,789 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 0679d032e7164c492eb55b7afc1f1438 2024-12-09T16:33:27,789 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:33:27,789 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 0679d032e7164c492eb55b7afc1f1438 2024-12-09T16:33:27,789 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 0679d032e7164c492eb55b7afc1f1438 2024-12-09T16:33:27,791 INFO [StoreOpener-0679d032e7164c492eb55b7afc1f1438-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 0679d032e7164c492eb55b7afc1f1438 2024-12-09T16:33:27,794 INFO [StoreOpener-0679d032e7164c492eb55b7afc1f1438-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0679d032e7164c492eb55b7afc1f1438 columnFamilyName info 2024-12-09T16:33:27,794 DEBUG [StoreOpener-0679d032e7164c492eb55b7afc1f1438-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:33:27,795 INFO [StoreOpener-0679d032e7164c492eb55b7afc1f1438-1 {}] regionserver.HStore(327): Store=0679d032e7164c492eb55b7afc1f1438/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:33:27,795 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 0679d032e7164c492eb55b7afc1f1438 2024-12-09T16:33:27,797 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438 2024-12-09T16:33:27,798 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438 2024-12-09T16:33:27,799 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 0679d032e7164c492eb55b7afc1f1438 2024-12-09T16:33:27,799 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 0679d032e7164c492eb55b7afc1f1438 2024-12-09T16:33:27,801 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 0679d032e7164c492eb55b7afc1f1438 2024-12-09T16:33:27,804 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T16:33:27,805 INFO [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 0679d032e7164c492eb55b7afc1f1438; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=780972, jitterRate=-0.006943374872207642}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T16:33:27,806 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0679d032e7164c492eb55b7afc1f1438 2024-12-09T16:33:27,806 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 0679d032e7164c492eb55b7afc1f1438: Running coprocessor pre-open hook at 1733762007789Writing region info on filesystem at 1733762007789Initializing all the Stores at 1733762007791 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733762007791Cleaning up temporary data from old regions at 1733762007799 (+8 ms)Running coprocessor post-open hooks at 1733762007806 (+7 ms)Region opened successfully at 1733762007806 2024-12-09T16:33:27,808 INFO [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438., pid=6, masterSystemTime=1733762007782 2024-12-09T16:33:27,812 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438. 2024-12-09T16:33:27,812 INFO [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438. 2024-12-09T16:33:27,819 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0679d032e7164c492eb55b7afc1f1438, regionState=OPEN, openSeqNum=2, regionLocation=dfce6ce5b10c,38447,1733762004946 2024-12-09T16:33:27,826 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0679d032e7164c492eb55b7afc1f1438, server=dfce6ce5b10c,38447,1733762004946 because future has completed 2024-12-09T16:33:27,842 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T16:33:27,842 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=0679d032e7164c492eb55b7afc1f1438, ASSIGN in 367 msec 2024-12-09T16:33:27,842 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T16:33:27,843 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 0679d032e7164c492eb55b7afc1f1438, server=dfce6ce5b10c,38447,1733762004946 in 201 msec 2024-12-09T16:33:27,844 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T16:33:27,845 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733762007844"}]},"ts":"1733762007844"} 2024-12-09T16:33:27,849 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-12-09T16:33:27,851 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T16:33:27,855 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 470 msec 2024-12-09T16:33:28,092 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:28,477 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:28,532 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:29,093 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:29,478 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:29,533 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:29,638 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T16:33:29,638 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T16:33:29,639 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T16:33:29,639 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T16:33:29,639 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T16:33:29,639 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-09T16:33:29,640 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T16:33:29,640 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-12-09T16:33:30,094 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:30,479 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:30,534 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:31,095 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:31,480 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:31,534 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:32,096 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:32,097 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 after 68054ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:33:32,480 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:32,535 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:32,606 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T16:33:32,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:32,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:32,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:32,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:32,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:32,608 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:32,626 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:32,626 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:32,626 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:32,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:32,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:32,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:32,630 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:32,631 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:32,631 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:32,634 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:33:32,639 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T16:33:32,640 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-12-09T16:33:33,097 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:33,481 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:33,536 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:34,098 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:34,482 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:34,537 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:35,098 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:35,483 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:35,537 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:36,099 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:36,484 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:36,538 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:37,100 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:37,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36223 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T16:33:37,452 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T16:33:37,452 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-12-09T16:33:37,457 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T16:33:37,457 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438. 2024-12-09T16:33:37,462 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438., hostname=dfce6ce5b10c,38447,1733762004946, seqNum=2] 2024-12-09T16:33:37,472 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36223 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T16:33:37,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36223 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T16:33:37,482 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-09T16:33:37,484 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T16:33:37,484 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:37,486 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T16:33:37,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36223 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T16:33:37,542 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:37,652 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38447 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-09T16:33:37,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438. 2024-12-09T16:33:37,658 INFO [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 0679d032e7164c492eb55b7afc1f1438 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T16:33:37,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/.tmp/info/cf70c38e7094454290a59f759a18b77a is 1080, key is row0001/info:/1733762017464/Put/seqid=0 2024-12-09T16:33:37,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741837_1013 (size=6033) 2024-12-09T16:33:37,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741837_1013 (size=6033) 2024-12-09T16:33:37,736 INFO [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/.tmp/info/cf70c38e7094454290a59f759a18b77a 2024-12-09T16:33:37,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/.tmp/info/cf70c38e7094454290a59f759a18b77a as hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/info/cf70c38e7094454290a59f759a18b77a 2024-12-09T16:33:37,760 INFO [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/info/cf70c38e7094454290a59f759a18b77a, entries=1, sequenceid=5, filesize=5.9 K 2024-12-09T16:33:37,762 INFO [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 0679d032e7164c492eb55b7afc1f1438 in 109ms, sequenceid=5, compaction requested=false 2024-12-09T16:33:37,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 0679d032e7164c492eb55b7afc1f1438: 2024-12-09T16:33:37,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438. 2024-12-09T16:33:37,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-09T16:33:37,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36223 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-09T16:33:37,773 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-09T16:33:37,773 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 282 msec 2024-12-09T16:33:37,778 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 301 msec 2024-12-09T16:33:38,101 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:38,485 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:38,542 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:39,102 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:39,486 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:39,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:40,102 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:40,487 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:40,544 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:41,103 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:41,488 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:41,545 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:42,104 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:42,488 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:42,489 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 after 68061ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:33:42,545 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:42,546 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta after 68064ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:33:43,105 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:43,490 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:43,547 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:44,105 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:44,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:44,547 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:45,106 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:45,491 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:45,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:46,107 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:46,492 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:46,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:47,108 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:47,493 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:47,550 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:47,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36223 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T16:33:47,561 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T16:33:47,564 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36223 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T16:33:47,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36223 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T16:33:47,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36223 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-09T16:33:47,567 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-09T16:33:47,568 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T16:33:47,568 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T16:33:47,722 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38447 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-12-09T16:33:47,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438. 2024-12-09T16:33:47,723 INFO [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 0679d032e7164c492eb55b7afc1f1438 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T16:33:47,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/.tmp/info/0404491d607e45768b8a667134417d9c is 1080, key is row0002/info:/1733762027563/Put/seqid=0 2024-12-09T16:33:47,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741838_1014 (size=6033) 2024-12-09T16:33:47,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741838_1014 (size=6033) 2024-12-09T16:33:47,733 INFO [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/.tmp/info/0404491d607e45768b8a667134417d9c 2024-12-09T16:33:47,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/.tmp/info/0404491d607e45768b8a667134417d9c as hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/info/0404491d607e45768b8a667134417d9c 2024-12-09T16:33:47,746 INFO [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/info/0404491d607e45768b8a667134417d9c, entries=1, sequenceid=9, filesize=5.9 K 2024-12-09T16:33:47,747 INFO [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 0679d032e7164c492eb55b7afc1f1438 in 24ms, sequenceid=9, compaction requested=false 2024-12-09T16:33:47,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 0679d032e7164c492eb55b7afc1f1438: 2024-12-09T16:33:47,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438. 2024-12-09T16:33:47,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-12-09T16:33:47,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36223 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-12-09T16:33:47,752 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-09T16:33:47,752 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 181 msec 2024-12-09T16:33:47,755 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 188 msec 2024-12-09T16:33:48,109 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:48,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:48,551 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:49,113 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:49,494 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:49,551 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:50,114 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:50,495 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:50,552 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:51,115 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:51,496 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:51,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:52,118 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:52,497 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:52,554 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:53,118 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:53,498 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:53,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:54,119 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:54,499 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:54,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:54,724 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T16:33:55,120 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:55,499 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:55,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:56,120 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:56,501 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:56,558 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:57,121 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:57,502 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:57,562 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:57,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36223 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-09T16:33:57,623 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T16:33:57,628 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C38447%2C1733762004946.1733762037627 2024-12-09T16:33:57,688 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:57,688 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:57,688 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:57,688 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:57,689 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:33:57,689 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/WALs/dfce6ce5b10c,38447,1733762004946/dfce6ce5b10c%2C38447%2C1733762004946.1733762006420 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/WALs/dfce6ce5b10c,38447,1733762004946/dfce6ce5b10c%2C38447%2C1733762004946.1733762037627 2024-12-09T16:33:57,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741833_1009 (size=5546) 2024-12-09T16:33:57,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741833_1009 (size=5546) 2024-12-09T16:33:57,728 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34149:34149),(127.0.0.1/127.0.0.1:37783:37783)] 2024-12-09T16:33:57,729 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36223 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T16:33:57,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36223 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T16:33:57,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36223 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-09T16:33:57,733 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-09T16:33:57,735 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T16:33:57,736 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T16:33:57,894 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38447 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-12-09T16:33:57,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438. 2024-12-09T16:33:57,895 INFO [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 0679d032e7164c492eb55b7afc1f1438 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T16:33:57,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/.tmp/info/6c550886808843fb983f68990d7d49e6 is 1080, key is row0003/info:/1733762037625/Put/seqid=0 2024-12-09T16:33:57,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741840_1016 (size=6033) 2024-12-09T16:33:57,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741840_1016 (size=6033) 2024-12-09T16:33:57,937 INFO [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/.tmp/info/6c550886808843fb983f68990d7d49e6 2024-12-09T16:33:57,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/.tmp/info/6c550886808843fb983f68990d7d49e6 as hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/info/6c550886808843fb983f68990d7d49e6 2024-12-09T16:33:57,962 INFO [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/info/6c550886808843fb983f68990d7d49e6, entries=1, sequenceid=13, filesize=5.9 K 2024-12-09T16:33:57,966 INFO [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 0679d032e7164c492eb55b7afc1f1438 in 71ms, sequenceid=13, compaction requested=true 2024-12-09T16:33:57,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 0679d032e7164c492eb55b7afc1f1438: 2024-12-09T16:33:57,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438. 2024-12-09T16:33:57,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-12-09T16:33:57,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36223 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-12-09T16:33:57,974 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-09T16:33:57,974 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 234 msec 2024-12-09T16:33:57,979 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 246 msec 2024-12-09T16:33:58,122 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:58,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:58,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:59,123 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:59,503 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:33:59,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:00,124 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:00,504 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:00,564 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:01,126 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:01,505 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:01,565 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:02,126 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:02,506 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:02,566 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:03,127 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:03,507 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:03,567 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:04,128 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:04,508 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:04,568 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:05,129 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:05,508 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:05,568 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:06,129 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:06,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:06,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:07,130 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:07,509 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:07,570 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:07,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36223 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-09T16:34:07,841 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T16:34:07,842 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T16:34:07,843 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T16:34:07,843 DEBUG [Time-limited test {}] regionserver.HStore(1541): 0679d032e7164c492eb55b7afc1f1438/info is initiating minor compaction (all files) 2024-12-09T16:34:07,843 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T16:34:07,843 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:34:07,843 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 0679d032e7164c492eb55b7afc1f1438/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438. 2024-12-09T16:34:07,843 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/info/cf70c38e7094454290a59f759a18b77a, hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/info/0404491d607e45768b8a667134417d9c, hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/info/6c550886808843fb983f68990d7d49e6] into tmpdir=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/.tmp, totalSize=17.7 K 2024-12-09T16:34:07,844 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting cf70c38e7094454290a59f759a18b77a, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733762017464 2024-12-09T16:34:07,844 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 0404491d607e45768b8a667134417d9c, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1733762027563 2024-12-09T16:34:07,844 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 6c550886808843fb983f68990d7d49e6, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733762037625 2024-12-09T16:34:07,856 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 0679d032e7164c492eb55b7afc1f1438#info#compaction#45 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T16:34:07,857 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/.tmp/info/e2ce95c29cd048c69996168ab8f3b4e5 is 1080, key is row0001/info:/1733762017464/Put/seqid=0 2024-12-09T16:34:07,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741841_1017 (size=8296) 2024-12-09T16:34:07,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741841_1017 (size=8296) 2024-12-09T16:34:07,871 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/.tmp/info/e2ce95c29cd048c69996168ab8f3b4e5 as hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/info/e2ce95c29cd048c69996168ab8f3b4e5 2024-12-09T16:34:07,879 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0679d032e7164c492eb55b7afc1f1438/info of 0679d032e7164c492eb55b7afc1f1438 into e2ce95c29cd048c69996168ab8f3b4e5(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T16:34:07,879 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 0679d032e7164c492eb55b7afc1f1438: 2024-12-09T16:34:07,882 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C38447%2C1733762004946.1733762047882 2024-12-09T16:34:07,888 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:34:07,888 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:34:07,888 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:34:07,888 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:34:07,888 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:34:07,888 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/WALs/dfce6ce5b10c,38447,1733762004946/dfce6ce5b10c%2C38447%2C1733762004946.1733762037627 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/WALs/dfce6ce5b10c,38447,1733762004946/dfce6ce5b10c%2C38447%2C1733762004946.1733762047882 2024-12-09T16:34:07,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741839_1015 (size=2520) 2024-12-09T16:34:07,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741839_1015 (size=2520) 2024-12-09T16:34:07,901 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/WALs/dfce6ce5b10c,38447,1733762004946/dfce6ce5b10c%2C38447%2C1733762004946.1733762006420 to hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/oldWALs/dfce6ce5b10c%2C38447%2C1733762004946.1733762006420 2024-12-09T16:34:07,901 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34149:34149),(127.0.0.1/127.0.0.1:37783:37783)] 2024-12-09T16:34:07,902 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36223 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T16:34:07,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36223 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T16:34:07,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36223 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-09T16:34:07,904 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-09T16:34:07,905 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T16:34:07,906 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T16:34:08,058 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38447 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-12-09T16:34:08,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438. 2024-12-09T16:34:08,059 INFO [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 0679d032e7164c492eb55b7afc1f1438 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T16:34:08,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/.tmp/info/882c4bfda6b24c548785b449ac7e8071 is 1080, key is row0000/info:/1733762047880/Put/seqid=0 2024-12-09T16:34:08,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741843_1019 (size=6033) 2024-12-09T16:34:08,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741843_1019 (size=6033) 2024-12-09T16:34:08,070 INFO [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/.tmp/info/882c4bfda6b24c548785b449ac7e8071 2024-12-09T16:34:08,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/.tmp/info/882c4bfda6b24c548785b449ac7e8071 as hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/info/882c4bfda6b24c548785b449ac7e8071 2024-12-09T16:34:08,083 INFO [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/info/882c4bfda6b24c548785b449ac7e8071, entries=1, sequenceid=18, filesize=5.9 K 2024-12-09T16:34:08,084 INFO [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 0679d032e7164c492eb55b7afc1f1438 in 25ms, sequenceid=18, compaction requested=false 2024-12-09T16:34:08,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 0679d032e7164c492eb55b7afc1f1438: 2024-12-09T16:34:08,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438. 2024-12-09T16:34:08,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-09T16:34:08,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36223 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-09T16:34:08,086 INFO [master/dfce6ce5b10c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T16:34:08,086 INFO [master/dfce6ce5b10c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T16:34:08,089 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-09T16:34:08,089 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 181 msec 2024-12-09T16:34:08,091 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 188 msec 2024-12-09T16:34:08,131 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:08,510 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:08,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:09,131 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:09,511 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:09,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:10,132 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:10,511 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:10,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:11,133 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:11,512 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:11,573 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:12,133 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:12,513 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:12,574 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:12,789 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 0679d032e7164c492eb55b7afc1f1438, had cached 0 bytes from a total of 14329 2024-12-09T16:34:13,134 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:13,513 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:13,574 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:14,135 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:14,515 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:14,575 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:15,136 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:15,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:15,576 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:16,136 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:16,516 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:16,577 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:17,137 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:17,517 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:17,577 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:18,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36223 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-09T16:34:18,010 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T16:34:18,020 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C38447%2C1733762004946.1733762058020 2024-12-09T16:34:18,056 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:34:18,056 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:34:18,056 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:34:18,056 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:34:18,056 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:34:18,057 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/WALs/dfce6ce5b10c,38447,1733762004946/dfce6ce5b10c%2C38447%2C1733762004946.1733762047882 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/WALs/dfce6ce5b10c,38447,1733762004946/dfce6ce5b10c%2C38447%2C1733762004946.1733762058020 2024-12-09T16:34:18,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741842_1018 (size=2026) 2024-12-09T16:34:18,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741842_1018 (size=2026) 2024-12-09T16:34:18,062 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/WALs/dfce6ce5b10c,38447,1733762004946/dfce6ce5b10c%2C38447%2C1733762004946.1733762037627 to hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/oldWALs/dfce6ce5b10c%2C38447%2C1733762004946.1733762037627 2024-12-09T16:34:18,114 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34149:34149),(127.0.0.1/127.0.0.1:37783:37783)] 2024-12-09T16:34:18,114 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T16:34:18,114 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T16:34:18,114 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T16:34:18,114 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:34:18,115 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:34:18,115 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T16:34:18,115 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T16:34:18,115 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=821008576, stopped=false 2024-12-09T16:34:18,115 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=dfce6ce5b10c,36223,1733762004745 2024-12-09T16:34:18,138 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:18,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38447-0x100bfc6b5b10001, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T16:34:18,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38447-0x100bfc6b5b10001, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:34:18,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T16:34:18,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:34:18,165 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T16:34:18,166 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T16:34:18,166 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T16:34:18,166 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:34:18,166 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'dfce6ce5b10c,38447,1733762004946' ***** 2024-12-09T16:34:18,166 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T16:34:18,166 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:34:18,166 INFO [RS:0;dfce6ce5b10c:38447 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T16:34:18,166 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T16:34:18,166 INFO [RS:0;dfce6ce5b10c:38447 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T16:34:18,167 INFO [RS:0;dfce6ce5b10c:38447 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T16:34:18,167 INFO [RS:0;dfce6ce5b10c:38447 {}] regionserver.HRegionServer(3091): Received CLOSE for 0679d032e7164c492eb55b7afc1f1438 2024-12-09T16:34:18,167 INFO [RS:0;dfce6ce5b10c:38447 {}] regionserver.HRegionServer(959): stopping server dfce6ce5b10c,38447,1733762004946 2024-12-09T16:34:18,167 INFO [RS:0;dfce6ce5b10c:38447 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T16:34:18,167 INFO [RS:0;dfce6ce5b10c:38447 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;dfce6ce5b10c:38447. 2024-12-09T16:34:18,168 DEBUG [RS:0;dfce6ce5b10c:38447 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T16:34:18,168 DEBUG [RS:0;dfce6ce5b10c:38447 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:34:18,168 INFO [RS:0;dfce6ce5b10c:38447 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T16:34:18,168 INFO [RS:0;dfce6ce5b10c:38447 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T16:34:18,168 INFO [RS:0;dfce6ce5b10c:38447 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T16:34:18,168 INFO [RS:0;dfce6ce5b10c:38447 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T16:34:18,168 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0679d032e7164c492eb55b7afc1f1438, disabling compactions & flushes 2024-12-09T16:34:18,168 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438. 2024-12-09T16:34:18,169 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438. 2024-12-09T16:34:18,169 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438. after waiting 0 ms 2024-12-09T16:34:18,169 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438. 2024-12-09T16:34:18,169 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 0679d032e7164c492eb55b7afc1f1438 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T16:34:18,170 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38447-0x100bfc6b5b10001, quorum=127.0.0.1:60314, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:34:18,178 INFO [RS:0;dfce6ce5b10c:38447 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T16:34:18,178 DEBUG [RS:0;dfce6ce5b10c:38447 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 0679d032e7164c492eb55b7afc1f1438=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438.} 2024-12-09T16:34:18,178 DEBUG [RS:0;dfce6ce5b10c:38447 {}] regionserver.HRegionServer(1351): Waiting on 0679d032e7164c492eb55b7afc1f1438, 1588230740 2024-12-09T16:34:18,178 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T16:34:18,179 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T16:34:18,179 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T16:34:18,179 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T16:34:18,179 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T16:34:18,179 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-12-09T16:34:18,192 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/.tmp/info/cddbcadadb8b440ba0535bd6dd7d4e72 is 1080, key is row0001/info:/1733762058015/Put/seqid=0 2024-12-09T16:34:18,232 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/hbase/meta/1588230740/.tmp/info/c2fcf339d8204ffbab1d113b5e93bda8 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438./info:regioninfo/1733762007818/Put/seqid=0 2024-12-09T16:34:18,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741845_1021 (size=6033) 2024-12-09T16:34:18,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741845_1021 (size=6033) 2024-12-09T16:34:18,248 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/.tmp/info/cddbcadadb8b440ba0535bd6dd7d4e72 2024-12-09T16:34:18,275 INFO [regionserver/dfce6ce5b10c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T16:34:18,280 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/.tmp/info/cddbcadadb8b440ba0535bd6dd7d4e72 as hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/info/cddbcadadb8b440ba0535bd6dd7d4e72 2024-12-09T16:34:18,290 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/info/cddbcadadb8b440ba0535bd6dd7d4e72, entries=1, sequenceid=22, filesize=5.9 K 2024-12-09T16:34:18,298 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 0679d032e7164c492eb55b7afc1f1438 in 129ms, sequenceid=22, compaction requested=true 2024-12-09T16:34:18,304 INFO [regionserver/dfce6ce5b10c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T16:34:18,304 INFO [regionserver/dfce6ce5b10c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T16:34:18,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741846_1022 (size=7308) 2024-12-09T16:34:18,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741846_1022 (size=7308) 2024-12-09T16:34:18,318 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/hbase/meta/1588230740/.tmp/info/c2fcf339d8204ffbab1d113b5e93bda8 2024-12-09T16:34:18,322 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/info/cf70c38e7094454290a59f759a18b77a, hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/info/0404491d607e45768b8a667134417d9c, hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/info/6c550886808843fb983f68990d7d49e6] to archive 2024-12-09T16:34:18,324 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T16:34:18,329 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/info/cf70c38e7094454290a59f759a18b77a to hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/info/cf70c38e7094454290a59f759a18b77a 2024-12-09T16:34:18,331 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/info/0404491d607e45768b8a667134417d9c to hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/info/0404491d607e45768b8a667134417d9c 2024-12-09T16:34:18,335 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/info/6c550886808843fb983f68990d7d49e6 to hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/info/6c550886808843fb983f68990d7d49e6 2024-12-09T16:34:18,336 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=dfce6ce5b10c:36223 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-09T16:34:18,337 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [cf70c38e7094454290a59f759a18b77a=6033, 0404491d607e45768b8a667134417d9c=6033, 6c550886808843fb983f68990d7d49e6=6033] 2024-12-09T16:34:18,378 DEBUG [RS:0;dfce6ce5b10c:38447 {}] regionserver.HRegionServer(1351): Waiting on 0679d032e7164c492eb55b7afc1f1438, 1588230740 2024-12-09T16:34:18,380 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/hbase/meta/1588230740/.tmp/ns/6c5ec801b02644e49d099bf9b4d5caa8 is 43, key is default/ns:d/1733762007248/Put/seqid=0 2024-12-09T16:34:18,381 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0679d032e7164c492eb55b7afc1f1438/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-12-09T16:34:18,384 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438. 2024-12-09T16:34:18,384 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0679d032e7164c492eb55b7afc1f1438: Waiting for close lock at 1733762058168Running coprocessor pre-close hooks at 1733762058168Disabling compacts and flushes for region at 1733762058168Disabling writes for close at 1733762058169 (+1 ms)Obtaining lock to block concurrent updates at 1733762058169Preparing flush snapshotting stores in 0679d032e7164c492eb55b7afc1f1438 at 1733762058169Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1733762058169Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438. at 1733762058171 (+2 ms)Flushing 0679d032e7164c492eb55b7afc1f1438/info: creating writer at 1733762058171Flushing 0679d032e7164c492eb55b7afc1f1438/info: appending metadata at 1733762058191 (+20 ms)Flushing 0679d032e7164c492eb55b7afc1f1438/info: closing flushed file at 1733762058191Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7abf8eb8: reopening flushed file at 1733762058279 (+88 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 0679d032e7164c492eb55b7afc1f1438 in 129ms, sequenceid=22, compaction requested=true at 1733762058298 (+19 ms)Writing region close event to WAL at 1733762058354 (+56 ms)Running coprocessor post-close hooks at 1733762058383 (+29 ms)Closed at 1733762058384 (+1 ms) 2024-12-09T16:34:18,384 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733762007380.0679d032e7164c492eb55b7afc1f1438. 2024-12-09T16:34:18,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741847_1023 (size=5153) 2024-12-09T16:34:18,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741847_1023 (size=5153) 2024-12-09T16:34:18,409 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/hbase/meta/1588230740/.tmp/ns/6c5ec801b02644e49d099bf9b4d5caa8 2024-12-09T16:34:18,454 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/hbase/meta/1588230740/.tmp/table/6baea12119ad46dc814d4b69a93e2469 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1733762007844/Put/seqid=0 2024-12-09T16:34:18,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741848_1024 (size=5508) 2024-12-09T16:34:18,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741848_1024 (size=5508) 2024-12-09T16:34:18,461 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/hbase/meta/1588230740/.tmp/table/6baea12119ad46dc814d4b69a93e2469 2024-12-09T16:34:18,469 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/hbase/meta/1588230740/.tmp/info/c2fcf339d8204ffbab1d113b5e93bda8 as hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/hbase/meta/1588230740/info/c2fcf339d8204ffbab1d113b5e93bda8 2024-12-09T16:34:18,481 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/hbase/meta/1588230740/info/c2fcf339d8204ffbab1d113b5e93bda8, entries=10, sequenceid=11, filesize=7.1 K 2024-12-09T16:34:18,484 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/hbase/meta/1588230740/.tmp/ns/6c5ec801b02644e49d099bf9b4d5caa8 as hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/hbase/meta/1588230740/ns/6c5ec801b02644e49d099bf9b4d5caa8 2024-12-09T16:34:18,492 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/hbase/meta/1588230740/ns/6c5ec801b02644e49d099bf9b4d5caa8, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T16:34:18,493 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/hbase/meta/1588230740/.tmp/table/6baea12119ad46dc814d4b69a93e2469 as hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/hbase/meta/1588230740/table/6baea12119ad46dc814d4b69a93e2469 2024-12-09T16:34:18,500 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/hbase/meta/1588230740/table/6baea12119ad46dc814d4b69a93e2469, entries=2, sequenceid=11, filesize=5.4 K 2024-12-09T16:34:18,502 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 322ms, sequenceid=11, compaction requested=false 2024-12-09T16:34:18,519 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:18,530 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T16:34:18,532 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T16:34:18,532 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T16:34:18,532 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733762058178Running coprocessor pre-close hooks at 1733762058178Disabling compacts and flushes for region at 1733762058178Disabling writes for close at 1733762058179 (+1 ms)Obtaining lock to block concurrent updates at 1733762058179Preparing flush snapshotting stores in 1588230740 at 1733762058179Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1733762058180 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733762058181 (+1 ms)Flushing 1588230740/info: creating writer at 1733762058181Flushing 1588230740/info: appending metadata at 1733762058231 (+50 ms)Flushing 1588230740/info: closing flushed file at 1733762058231Flushing 1588230740/ns: creating writer at 1733762058329 (+98 ms)Flushing 1588230740/ns: appending metadata at 1733762058379 (+50 ms)Flushing 1588230740/ns: closing flushed file at 1733762058379Flushing 1588230740/table: creating writer at 1733762058423 (+44 ms)Flushing 1588230740/table: appending metadata at 1733762058453 (+30 ms)Flushing 1588230740/table: closing flushed file at 1733762058453Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b479a7d: reopening flushed file at 1733762058468 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e971c81: reopening flushed file at 1733762058481 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5d2adcfa: reopening flushed file at 1733762058492 (+11 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 322ms, sequenceid=11, compaction requested=false at 1733762058502 (+10 ms)Writing region close event to WAL at 1733762058520 (+18 ms)Running coprocessor post-close hooks at 1733762058532 (+12 ms)Closed at 1733762058532 2024-12-09T16:34:18,532 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T16:34:18,578 INFO [RS:0;dfce6ce5b10c:38447 {}] regionserver.HRegionServer(976): stopping server dfce6ce5b10c,38447,1733762004946; all regions closed. 2024-12-09T16:34:18,578 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:18,579 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:34:18,579 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:34:18,580 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:34:18,580 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:34:18,580 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:34:18,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741834_1010 (size=3306) 2024-12-09T16:34:18,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741834_1010 (size=3306) 2024-12-09T16:34:18,592 DEBUG [RS:0;dfce6ce5b10c:38447 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/oldWALs 2024-12-09T16:34:18,592 INFO [RS:0;dfce6ce5b10c:38447 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog dfce6ce5b10c%2C38447%2C1733762004946.meta:.meta(num 1733762006997) 2024-12-09T16:34:18,593 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:34:18,593 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:34:18,593 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:34:18,593 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:34:18,593 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:34:18,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741844_1020 (size=1252) 2024-12-09T16:34:18,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741844_1020 (size=1252) 2024-12-09T16:34:18,603 DEBUG [RS:0;dfce6ce5b10c:38447 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/oldWALs 2024-12-09T16:34:18,603 INFO [RS:0;dfce6ce5b10c:38447 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog dfce6ce5b10c%2C38447%2C1733762004946:(num 1733762058020) 2024-12-09T16:34:18,603 DEBUG [RS:0;dfce6ce5b10c:38447 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:34:18,603 INFO [RS:0;dfce6ce5b10c:38447 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T16:34:18,604 INFO [RS:0;dfce6ce5b10c:38447 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T16:34:18,604 INFO [RS:0;dfce6ce5b10c:38447 {}] hbase.ChoreService(370): Chore service for: regionserver/dfce6ce5b10c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T16:34:18,604 INFO [RS:0;dfce6ce5b10c:38447 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T16:34:18,604 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T16:34:18,604 INFO [RS:0;dfce6ce5b10c:38447 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38447 2024-12-09T16:34:18,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38447-0x100bfc6b5b10001, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/dfce6ce5b10c,38447,1733762004946 2024-12-09T16:34:18,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T16:34:18,619 INFO [RS:0;dfce6ce5b10c:38447 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T16:34:18,619 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [dfce6ce5b10c,38447,1733762004946] 2024-12-09T16:34:18,637 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/dfce6ce5b10c,38447,1733762004946 already deleted, retry=false 2024-12-09T16:34:18,637 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; dfce6ce5b10c,38447,1733762004946 expired; onlineServers=0 2024-12-09T16:34:18,637 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'dfce6ce5b10c,36223,1733762004745' ***** 2024-12-09T16:34:18,637 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T16:34:18,637 INFO [M:0;dfce6ce5b10c:36223 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T16:34:18,638 INFO [M:0;dfce6ce5b10c:36223 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T16:34:18,638 DEBUG [M:0;dfce6ce5b10c:36223 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T16:34:18,638 DEBUG [M:0;dfce6ce5b10c:36223 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T16:34:18,638 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.large.0-1733762006158 {}] cleaner.HFileCleaner(306): Exit Thread[master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.large.0-1733762006158,5,FailOnTimeoutGroup] 2024-12-09T16:34:18,638 INFO [M:0;dfce6ce5b10c:36223 {}] hbase.ChoreService(370): Chore service for: master/dfce6ce5b10c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T16:34:18,638 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.small.0-1733762006163 {}] cleaner.HFileCleaner(306): Exit Thread[master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.small.0-1733762006163,5,FailOnTimeoutGroup] 2024-12-09T16:34:18,638 INFO [M:0;dfce6ce5b10c:36223 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T16:34:18,638 DEBUG [M:0;dfce6ce5b10c:36223 {}] master.HMaster(1795): Stopping service threads 2024-12-09T16:34:18,638 INFO [M:0;dfce6ce5b10c:36223 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T16:34:18,638 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T16:34:18,638 INFO [M:0;dfce6ce5b10c:36223 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T16:34:18,639 INFO [M:0;dfce6ce5b10c:36223 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T16:34:18,640 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T16:34:18,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T16:34:18,654 DEBUG [M:0;dfce6ce5b10c:36223 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-12-09T16:34:18,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:34:18,654 DEBUG [M:0;dfce6ce5b10c:36223 {}] master.ActiveMasterManager(353): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-12-09T16:34:18,655 INFO [M:0;dfce6ce5b10c:36223 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/.lastflushedseqids 2024-12-09T16:34:18,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741849_1025 (size=130) 2024-12-09T16:34:18,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741849_1025 (size=130) 2024-12-09T16:34:18,675 INFO [M:0;dfce6ce5b10c:36223 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T16:34:18,675 INFO [M:0;dfce6ce5b10c:36223 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T16:34:18,675 DEBUG [M:0;dfce6ce5b10c:36223 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T16:34:18,675 INFO [M:0;dfce6ce5b10c:36223 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:34:18,675 DEBUG [M:0;dfce6ce5b10c:36223 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:34:18,675 DEBUG [M:0;dfce6ce5b10c:36223 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T16:34:18,675 DEBUG [M:0;dfce6ce5b10c:36223 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:34:18,676 INFO [M:0;dfce6ce5b10c:36223 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.56 KB heapSize=54.94 KB 2024-12-09T16:34:18,701 DEBUG [M:0;dfce6ce5b10c:36223 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d35c7b842fbf476993b56473f2ebf81e is 82, key is hbase:meta,,1/info:regioninfo/1733762007146/Put/seqid=0 2024-12-09T16:34:18,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741850_1026 (size=5672) 2024-12-09T16:34:18,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741850_1026 (size=5672) 2024-12-09T16:34:18,713 INFO [M:0;dfce6ce5b10c:36223 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d35c7b842fbf476993b56473f2ebf81e 2024-12-09T16:34:18,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38447-0x100bfc6b5b10001, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:34:18,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38447-0x100bfc6b5b10001, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:34:18,729 INFO [RS:0;dfce6ce5b10c:38447 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T16:34:18,729 INFO [RS:0;dfce6ce5b10c:38447 {}] regionserver.HRegionServer(1031): Exiting; stopping=dfce6ce5b10c,38447,1733762004946; zookeeper connection closed. 2024-12-09T16:34:18,738 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1d7e6323 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1d7e6323 2024-12-09T16:34:18,739 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T16:34:18,744 DEBUG [M:0;dfce6ce5b10c:36223 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9a3946ba535b40d98a741802a03da473 is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733762007854/Put/seqid=0 2024-12-09T16:34:18,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741851_1027 (size=7819) 2024-12-09T16:34:18,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741851_1027 (size=7819) 2024-12-09T16:34:18,751 INFO [M:0;dfce6ce5b10c:36223 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.95 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9a3946ba535b40d98a741802a03da473 2024-12-09T16:34:18,757 INFO [M:0;dfce6ce5b10c:36223 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9a3946ba535b40d98a741802a03da473 2024-12-09T16:34:18,779 DEBUG [M:0;dfce6ce5b10c:36223 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3c908ed004a74c4e8b4173e81a72e3d0 is 69, key is dfce6ce5b10c,38447,1733762004946/rs:state/1733762006209/Put/seqid=0 2024-12-09T16:34:18,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741852_1028 (size=5156) 2024-12-09T16:34:18,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741852_1028 (size=5156) 2024-12-09T16:34:18,840 INFO [M:0;dfce6ce5b10c:36223 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3c908ed004a74c4e8b4173e81a72e3d0 2024-12-09T16:34:18,906 DEBUG [M:0;dfce6ce5b10c:36223 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3d7d7af3df4841f485582cec86b8a83a is 52, key is load_balancer_on/state:d/1733762007374/Put/seqid=0 2024-12-09T16:34:18,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741853_1029 (size=5056) 2024-12-09T16:34:18,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741853_1029 (size=5056) 2024-12-09T16:34:18,920 INFO [M:0;dfce6ce5b10c:36223 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3d7d7af3df4841f485582cec86b8a83a 2024-12-09T16:34:18,927 DEBUG [M:0;dfce6ce5b10c:36223 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d35c7b842fbf476993b56473f2ebf81e as hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d35c7b842fbf476993b56473f2ebf81e 2024-12-09T16:34:18,948 INFO [M:0;dfce6ce5b10c:36223 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d35c7b842fbf476993b56473f2ebf81e, entries=8, sequenceid=121, filesize=5.5 K 2024-12-09T16:34:18,950 DEBUG [M:0;dfce6ce5b10c:36223 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9a3946ba535b40d98a741802a03da473 as hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9a3946ba535b40d98a741802a03da473 2024-12-09T16:34:18,962 INFO [M:0;dfce6ce5b10c:36223 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9a3946ba535b40d98a741802a03da473 2024-12-09T16:34:18,962 INFO [M:0;dfce6ce5b10c:36223 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9a3946ba535b40d98a741802a03da473, entries=14, sequenceid=121, filesize=7.6 K 2024-12-09T16:34:18,964 DEBUG [M:0;dfce6ce5b10c:36223 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3c908ed004a74c4e8b4173e81a72e3d0 as hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3c908ed004a74c4e8b4173e81a72e3d0 2024-12-09T16:34:18,970 INFO [M:0;dfce6ce5b10c:36223 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3c908ed004a74c4e8b4173e81a72e3d0, entries=1, sequenceid=121, filesize=5.0 K 2024-12-09T16:34:18,973 DEBUG [M:0;dfce6ce5b10c:36223 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3d7d7af3df4841f485582cec86b8a83a as hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3d7d7af3df4841f485582cec86b8a83a 2024-12-09T16:34:18,982 INFO [M:0;dfce6ce5b10c:36223 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42743/user/jenkins/test-data/52f52881-e35f-c07a-3aa8-6ecd460029f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3d7d7af3df4841f485582cec86b8a83a, entries=1, sequenceid=121, filesize=4.9 K 2024-12-09T16:34:18,988 INFO [M:0;dfce6ce5b10c:36223 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.56 KB/44602, heapSize ~54.88 KB/56192, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 312ms, sequenceid=121, compaction requested=false 2024-12-09T16:34:18,991 INFO [M:0;dfce6ce5b10c:36223 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:34:18,991 DEBUG [M:0;dfce6ce5b10c:36223 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733762058675Disabling compacts and flushes for region at 1733762058675Disabling writes for close at 1733762058675Obtaining lock to block concurrent updates at 1733762058676 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733762058676Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44602, getHeapSize=56192, getOffHeapSize=0, getCellsCount=140 at 1733762058676Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733762058677 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733762058677Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733762058700 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733762058700Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733762058725 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733762058744 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733762058744Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733762058758 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733762058778 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733762058778Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733762058852 (+74 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733762058905 (+53 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733762058905Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@26d57b36: reopening flushed file at 1733762058926 (+21 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@525b462a: reopening flushed file at 1733762058949 (+23 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5a7a773d: reopening flushed file at 1733762058962 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6aa3c0a0: reopening flushed file at 1733762058971 (+9 ms)Finished flush of dataSize ~43.56 KB/44602, heapSize ~54.88 KB/56192, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 312ms, sequenceid=121, compaction requested=false at 1733762058988 (+17 ms)Writing region close event to WAL at 1733762058991 (+3 ms)Closed at 1733762058991 2024-12-09T16:34:18,992 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:34:18,992 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:34:18,993 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:34:19,006 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:34:19,007 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:34:19,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45183 is added to blk_1073741830_1006 (size=52999) 2024-12-09T16:34:19,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43725 is added to blk_1073741830_1006 (size=52999) 2024-12-09T16:34:19,139 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:19,414 INFO [M:0;dfce6ce5b10c:36223 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T16:34:19,414 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T16:34:19,414 INFO [M:0;dfce6ce5b10c:36223 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36223 2024-12-09T16:34:19,414 INFO [M:0;dfce6ce5b10c:36223 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T16:34:19,520 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:19,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:34:19,554 INFO [M:0;dfce6ce5b10c:36223 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T16:34:19,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36223-0x100bfc6b5b10000, quorum=127.0.0.1:60314, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:34:19,567 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@570a7045{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:34:19,567 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@36d948b4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:34:19,567 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:34:19,568 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c6bc9f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:34:19,568 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@14a397e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/hadoop.log.dir/,STOPPED} 2024-12-09T16:34:19,570 WARN [BP-2412976-172.17.0.2-1733762002720 heartbeating to localhost/127.0.0.1:42743 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T16:34:19,570 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T16:34:19,570 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T16:34:19,570 WARN [BP-2412976-172.17.0.2-1733762002720 heartbeating to localhost/127.0.0.1:42743 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2412976-172.17.0.2-1733762002720 (Datanode Uuid e44e6410-c0d8-430f-bb34-915d73b562a1) service to localhost/127.0.0.1:42743 2024-12-09T16:34:19,570 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/cluster_ccb313ee-e923-ed98-ecc7-c0e6d98db88e/data/data3/current/BP-2412976-172.17.0.2-1733762002720 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:34:19,571 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/cluster_ccb313ee-e923-ed98-ecc7-c0e6d98db88e/data/data4/current/BP-2412976-172.17.0.2-1733762002720 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:34:19,571 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T16:34:19,573 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@31ad985a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:34:19,574 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@37c97193{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:34:19,574 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:34:19,574 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@766bbc26{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:34:19,574 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3dd2af12{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/hadoop.log.dir/,STOPPED} 2024-12-09T16:34:19,579 WARN [BP-2412976-172.17.0.2-1733762002720 heartbeating to localhost/127.0.0.1:42743 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T16:34:19,579 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T16:34:19,579 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T16:34:19,579 WARN [BP-2412976-172.17.0.2-1733762002720 heartbeating to localhost/127.0.0.1:42743 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2412976-172.17.0.2-1733762002720 (Datanode Uuid 478ecbb2-78ef-4970-9929-344297393ed3) service to localhost/127.0.0.1:42743 2024-12-09T16:34:19,579 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:19,580 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/cluster_ccb313ee-e923-ed98-ecc7-c0e6d98db88e/data/data1/current/BP-2412976-172.17.0.2-1733762002720 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:34:19,580 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/cluster_ccb313ee-e923-ed98-ecc7-c0e6d98db88e/data/data2/current/BP-2412976-172.17.0.2-1733762002720 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:34:19,580 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T16:34:19,586 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6e87766c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T16:34:19,587 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@594a0329{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:34:19,587 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:34:19,588 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1e7fd6e5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:34:19,588 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4d17bbc5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/hadoop.log.dir/,STOPPED} 2024-12-09T16:34:19,595 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T16:34:19,621 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T16:34:19,632 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=208 (was 184) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:42743 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:42743 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42743 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42743 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42743 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:42743 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42743 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42743 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:42743 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=524 (was 448) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6611 (was 4962) - AvailableMemoryMB LEAK? - 2024-12-09T16:34:19,638 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T16:34:19,638 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T16:34:19,639 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T16:34:19,639 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T16:34:19,643 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=208, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=524, ProcessCount=11, AvailableMemoryMB=6611 2024-12-09T16:34:19,643 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T16:34:19,643 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/hadoop.log.dir so I do NOT create it in target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4 2024-12-09T16:34:19,643 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/e1876b98-a84a-50f9-aee9-40a2702cdd9d/hadoop.tmp.dir so I do NOT create it in target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4 2024-12-09T16:34:19,643 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/cluster_15a9bf8b-0915-b640-c1aa-6a829e065c94, deleteOnExit=true 2024-12-09T16:34:19,643 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T16:34:19,643 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/test.cache.data in system properties and HBase conf 2024-12-09T16:34:19,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T16:34:19,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/hadoop.log.dir in system properties and HBase conf 2024-12-09T16:34:19,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T16:34:19,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T16:34:19,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T16:34:19,644 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T16:34:19,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T16:34:19,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T16:34:19,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T16:34:19,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T16:34:19,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T16:34:19,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T16:34:19,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T16:34:19,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T16:34:19,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T16:34:19,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/nfs.dump.dir in system properties and HBase conf 2024-12-09T16:34:19,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/java.io.tmpdir in system properties and HBase conf 2024-12-09T16:34:19,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T16:34:19,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T16:34:19,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T16:34:19,658 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T16:34:20,039 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:34:20,044 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:34:20,056 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:34:20,056 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:34:20,056 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T16:34:20,059 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:34:20,060 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@37314cce{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:34:20,060 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@50c11e4d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:34:20,140 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:20,180 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3e1d4a93{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/java.io.tmpdir/jetty-localhost-42573-hadoop-hdfs-3_4_1-tests_jar-_-any-905508293867603953/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T16:34:20,181 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4b216a06{HTTP/1.1, (http/1.1)}{localhost:42573} 2024-12-09T16:34:20,181 INFO [Time-limited test {}] server.Server(415): Started @259347ms 2024-12-09T16:34:20,199 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T16:34:20,518 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:34:20,521 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:20,522 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:34:20,524 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:34:20,524 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:34:20,524 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T16:34:20,525 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10eef1f0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:34:20,525 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45abd465{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:34:20,580 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:20,667 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1a78718c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/java.io.tmpdir/jetty-localhost-39777-hadoop-hdfs-3_4_1-tests_jar-_-any-10706143334093951243/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:34:20,667 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@693d3430{HTTP/1.1, (http/1.1)}{localhost:39777} 2024-12-09T16:34:20,667 INFO [Time-limited test {}] server.Server(415): Started @259833ms 2024-12-09T16:34:20,668 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T16:34:20,766 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:34:20,770 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:34:20,775 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:34:20,775 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:34:20,775 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T16:34:20,778 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c6d9015{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:34:20,779 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2cd15cd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:34:20,888 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6ef289a6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/java.io.tmpdir/jetty-localhost-37737-hadoop-hdfs-3_4_1-tests_jar-_-any-4529075058076267237/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:34:20,889 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a8a455f{HTTP/1.1, (http/1.1)}{localhost:37737} 2024-12-09T16:34:20,889 INFO [Time-limited test {}] server.Server(415): Started @260055ms 2024-12-09T16:34:20,890 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T16:34:21,140 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:21,521 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:21,545 WARN [Thread-1967 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/cluster_15a9bf8b-0915-b640-c1aa-6a829e065c94/data/data1/current/BP-1718649844-172.17.0.2-1733762059663/current, will proceed with Du for space computation calculation, 2024-12-09T16:34:21,545 WARN [Thread-1968 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/cluster_15a9bf8b-0915-b640-c1aa-6a829e065c94/data/data2/current/BP-1718649844-172.17.0.2-1733762059663/current, will proceed with Du for space computation calculation, 2024-12-09T16:34:21,575 WARN [Thread-1931 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T16:34:21,578 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7f5b1c5a9279f59c with lease ID 0x61213d8873aa7d75: Processing first storage report for DS-963ac9fd-add2-49e7-bd28-72336cfde336 from datanode DatanodeRegistration(127.0.0.1:37067, datanodeUuid=6ddcb14b-9f49-457a-bd9a-f5a1245b178c, infoPort=44167, infoSecurePort=0, ipcPort=45571, storageInfo=lv=-57;cid=testClusterID;nsid=436962430;c=1733762059663) 2024-12-09T16:34:21,578 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7f5b1c5a9279f59c with lease ID 0x61213d8873aa7d75: from storage DS-963ac9fd-add2-49e7-bd28-72336cfde336 node DatanodeRegistration(127.0.0.1:37067, datanodeUuid=6ddcb14b-9f49-457a-bd9a-f5a1245b178c, infoPort=44167, infoSecurePort=0, ipcPort=45571, storageInfo=lv=-57;cid=testClusterID;nsid=436962430;c=1733762059663), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:34:21,578 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7f5b1c5a9279f59c with lease ID 0x61213d8873aa7d75: Processing first storage report for DS-9edd0e6b-81c7-4465-9b47-bee55b894927 from datanode DatanodeRegistration(127.0.0.1:37067, datanodeUuid=6ddcb14b-9f49-457a-bd9a-f5a1245b178c, infoPort=44167, infoSecurePort=0, ipcPort=45571, storageInfo=lv=-57;cid=testClusterID;nsid=436962430;c=1733762059663) 2024-12-09T16:34:21,578 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7f5b1c5a9279f59c with lease ID 0x61213d8873aa7d75: from storage DS-9edd0e6b-81c7-4465-9b47-bee55b894927 node DatanodeRegistration(127.0.0.1:37067, datanodeUuid=6ddcb14b-9f49-457a-bd9a-f5a1245b178c, infoPort=44167, infoSecurePort=0, ipcPort=45571, storageInfo=lv=-57;cid=testClusterID;nsid=436962430;c=1733762059663), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:34:21,580 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:21,679 WARN [Thread-1978 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/cluster_15a9bf8b-0915-b640-c1aa-6a829e065c94/data/data3/current/BP-1718649844-172.17.0.2-1733762059663/current, will proceed with Du for space computation calculation, 2024-12-09T16:34:21,680 WARN [Thread-1979 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/cluster_15a9bf8b-0915-b640-c1aa-6a829e065c94/data/data4/current/BP-1718649844-172.17.0.2-1733762059663/current, will proceed with Du for space computation calculation, 2024-12-09T16:34:21,722 WARN [Thread-1954 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T16:34:21,726 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf5946f86fd6dc72f with lease ID 0x61213d8873aa7d76: Processing first storage report for DS-096adc27-f52e-4c1f-b815-8f3cb153a78a from datanode DatanodeRegistration(127.0.0.1:38673, datanodeUuid=1f9d6c93-225b-4ed8-81d1-6a60f6a11598, infoPort=41691, infoSecurePort=0, ipcPort=39241, storageInfo=lv=-57;cid=testClusterID;nsid=436962430;c=1733762059663) 2024-12-09T16:34:21,726 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf5946f86fd6dc72f with lease ID 0x61213d8873aa7d76: from storage DS-096adc27-f52e-4c1f-b815-8f3cb153a78a node DatanodeRegistration(127.0.0.1:38673, datanodeUuid=1f9d6c93-225b-4ed8-81d1-6a60f6a11598, infoPort=41691, infoSecurePort=0, ipcPort=39241, storageInfo=lv=-57;cid=testClusterID;nsid=436962430;c=1733762059663), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:34:21,726 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf5946f86fd6dc72f with lease ID 0x61213d8873aa7d76: Processing first storage report for DS-366dea68-d48c-4950-8b0d-f1dd994fc00c from datanode DatanodeRegistration(127.0.0.1:38673, datanodeUuid=1f9d6c93-225b-4ed8-81d1-6a60f6a11598, infoPort=41691, infoSecurePort=0, ipcPort=39241, storageInfo=lv=-57;cid=testClusterID;nsid=436962430;c=1733762059663) 2024-12-09T16:34:21,726 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf5946f86fd6dc72f with lease ID 0x61213d8873aa7d76: from storage DS-366dea68-d48c-4950-8b0d-f1dd994fc00c node DatanodeRegistration(127.0.0.1:38673, datanodeUuid=1f9d6c93-225b-4ed8-81d1-6a60f6a11598, infoPort=41691, infoSecurePort=0, ipcPort=39241, storageInfo=lv=-57;cid=testClusterID;nsid=436962430;c=1733762059663), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:34:21,771 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4 2024-12-09T16:34:21,789 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/cluster_15a9bf8b-0915-b640-c1aa-6a829e065c94/zookeeper_0, clientPort=63832, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/cluster_15a9bf8b-0915-b640-c1aa-6a829e065c94/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/cluster_15a9bf8b-0915-b640-c1aa-6a829e065c94/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T16:34:21,790 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63832 2024-12-09T16:34:21,791 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:34:21,793 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:34:21,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741825_1001 (size=7) 2024-12-09T16:34:21,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741825_1001 (size=7) 2024-12-09T16:34:21,824 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa with version=8 2024-12-09T16:34:21,824 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/hbase-staging 2024-12-09T16:34:21,826 INFO [Time-limited test {}] client.ConnectionUtils(128): master/dfce6ce5b10c:0 server-side Connection retries=45 2024-12-09T16:34:21,826 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:34:21,826 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T16:34:21,826 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T16:34:21,826 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:34:21,826 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T16:34:21,826 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T16:34:21,826 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T16:34:21,827 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45079 2024-12-09T16:34:21,829 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45079 connecting to ZooKeeper ensemble=127.0.0.1:63832 2024-12-09T16:34:21,882 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:450790x0, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T16:34:21,884 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45079-0x100bfc794870000 connected 2024-12-09T16:34:21,952 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:34:21,954 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:34:21,962 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:34:21,962 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa, hbase.cluster.distributed=false 2024-12-09T16:34:21,964 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T16:34:21,972 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45079 2024-12-09T16:34:21,974 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45079 2024-12-09T16:34:21,974 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45079 2024-12-09T16:34:21,974 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45079 2024-12-09T16:34:21,977 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45079 2024-12-09T16:34:22,000 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/dfce6ce5b10c:0 server-side Connection retries=45 2024-12-09T16:34:22,000 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:34:22,000 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T16:34:22,000 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T16:34:22,000 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:34:22,000 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T16:34:22,000 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T16:34:22,001 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T16:34:22,002 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39501 2024-12-09T16:34:22,004 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39501 connecting to ZooKeeper ensemble=127.0.0.1:63832 2024-12-09T16:34:22,005 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:34:22,006 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:34:22,019 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:395010x0, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T16:34:22,019 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:395010x0, quorum=127.0.0.1:63832, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:34:22,020 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T16:34:22,020 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39501-0x100bfc794870001 connected 2024-12-09T16:34:22,026 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T16:34:22,028 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39501-0x100bfc794870001, quorum=127.0.0.1:63832, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T16:34:22,030 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39501-0x100bfc794870001, quorum=127.0.0.1:63832, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T16:34:22,042 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39501 2024-12-09T16:34:22,042 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39501 2024-12-09T16:34:22,042 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39501 2024-12-09T16:34:22,043 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39501 2024-12-09T16:34:22,043 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39501 2024-12-09T16:34:22,059 DEBUG [M:0;dfce6ce5b10c:45079 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;dfce6ce5b10c:45079 2024-12-09T16:34:22,059 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/dfce6ce5b10c,45079,1733762061826 2024-12-09T16:34:22,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:34:22,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39501-0x100bfc794870001, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:34:22,069 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/dfce6ce5b10c,45079,1733762061826 2024-12-09T16:34:22,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:34:22,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39501-0x100bfc794870001, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T16:34:22,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39501-0x100bfc794870001, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:34:22,078 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T16:34:22,078 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/dfce6ce5b10c,45079,1733762061826 from backup master directory 2024-12-09T16:34:22,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/dfce6ce5b10c,45079,1733762061826 2024-12-09T16:34:22,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:34:22,085 WARN [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T16:34:22,085 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=dfce6ce5b10c,45079,1733762061826 2024-12-09T16:34:22,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39501-0x100bfc794870001, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:34:22,090 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/hbase.id] with ID: c3e9b90a-1f6e-4cbb-a5f2-1301c0de34bd 2024-12-09T16:34:22,090 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/.tmp/hbase.id 2024-12-09T16:34:22,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741826_1002 (size=42) 2024-12-09T16:34:22,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741826_1002 (size=42) 2024-12-09T16:34:22,100 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/.tmp/hbase.id]:[hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/hbase.id] 2024-12-09T16:34:22,117 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:34:22,117 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T16:34:22,119 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-09T16:34:22,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:34:22,127 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39501-0x100bfc794870001, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:34:22,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:22,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741827_1003 (size=196) 2024-12-09T16:34:22,148 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T16:34:22,149 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T16:34:22,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741827_1003 (size=196) 2024-12-09T16:34:22,149 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T16:34:22,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741828_1004 (size=1189) 2024-12-09T16:34:22,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741828_1004 (size=1189) 2024-12-09T16:34:22,162 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/data/master/store 2024-12-09T16:34:22,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741829_1005 (size=34) 2024-12-09T16:34:22,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741829_1005 (size=34) 2024-12-09T16:34:22,180 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:34:22,181 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T16:34:22,181 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:34:22,181 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:34:22,181 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T16:34:22,181 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:34:22,181 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:34:22,181 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733762062181Disabling compacts and flushes for region at 1733762062181Disabling writes for close at 1733762062181Writing region close event to WAL at 1733762062181Closed at 1733762062181 2024-12-09T16:34:22,182 WARN [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/data/master/store/.initializing 2024-12-09T16:34:22,182 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/WALs/dfce6ce5b10c,45079,1733762061826 2024-12-09T16:34:22,185 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=dfce6ce5b10c%2C45079%2C1733762061826, suffix=, logDir=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/WALs/dfce6ce5b10c,45079,1733762061826, archiveDir=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/oldWALs, maxLogs=10 2024-12-09T16:34:22,186 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C45079%2C1733762061826.1733762062186 2024-12-09T16:34:22,193 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/WALs/dfce6ce5b10c,45079,1733762061826/dfce6ce5b10c%2C45079%2C1733762061826.1733762062186 2024-12-09T16:34:22,198 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44167:44167),(127.0.0.1/127.0.0.1:41691:41691)] 2024-12-09T16:34:22,200 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T16:34:22,201 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:34:22,201 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:34:22,201 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:34:22,205 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:34:22,207 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T16:34:22,207 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:34:22,208 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:34:22,208 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:34:22,209 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T16:34:22,210 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:34:22,210 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:34:22,210 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:34:22,212 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T16:34:22,212 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:34:22,213 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:34:22,213 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:34:22,218 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T16:34:22,218 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:34:22,219 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:34:22,219 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:34:22,220 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:34:22,221 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:34:22,223 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:34:22,223 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:34:22,223 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T16:34:22,225 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:34:22,227 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T16:34:22,229 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=873201, jitterRate=0.11033345758914948}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T16:34:22,229 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733762062201Initializing all the Stores at 1733762062202 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733762062202Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733762062202Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733762062202Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733762062202Cleaning up temporary data from old regions at 1733762062223 (+21 ms)Region opened successfully at 1733762062229 (+6 ms) 2024-12-09T16:34:22,230 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T16:34:22,234 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37f1ed4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=dfce6ce5b10c/172.17.0.2:0 2024-12-09T16:34:22,235 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T16:34:22,235 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T16:34:22,235 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T16:34:22,235 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T16:34:22,236 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T16:34:22,236 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T16:34:22,236 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T16:34:22,239 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T16:34:22,240 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T16:34:22,249 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T16:34:22,250 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T16:34:22,251 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T16:34:22,260 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T16:34:22,260 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T16:34:22,262 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T16:34:22,268 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T16:34:22,269 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T16:34:22,277 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T16:34:22,280 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T16:34:22,285 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T16:34:22,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T16:34:22,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39501-0x100bfc794870001, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T16:34:22,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39501-0x100bfc794870001, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:34:22,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:34:22,294 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=dfce6ce5b10c,45079,1733762061826, sessionid=0x100bfc794870000, setting cluster-up flag (Was=false) 2024-12-09T16:34:22,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:34:22,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39501-0x100bfc794870001, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:34:22,335 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T16:34:22,336 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=dfce6ce5b10c,45079,1733762061826 2024-12-09T16:34:22,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:34:22,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39501-0x100bfc794870001, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:34:22,393 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T16:34:22,395 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=dfce6ce5b10c,45079,1733762061826 2024-12-09T16:34:22,397 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T16:34:22,399 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T16:34:22,399 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T16:34:22,399 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T16:34:22,400 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: dfce6ce5b10c,45079,1733762061826 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T16:34:22,401 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/dfce6ce5b10c:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:34:22,401 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/dfce6ce5b10c:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:34:22,401 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:34:22,401 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:34:22,401 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/dfce6ce5b10c:0, corePoolSize=10, maxPoolSize=10 2024-12-09T16:34:22,401 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:34:22,401 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=2, maxPoolSize=2 2024-12-09T16:34:22,401 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:34:22,408 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T16:34:22,409 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T16:34:22,410 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:34:22,410 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T16:34:22,422 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733762092422 2024-12-09T16:34:22,422 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T16:34:22,422 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T16:34:22,422 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T16:34:22,423 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T16:34:22,423 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T16:34:22,423 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T16:34:22,423 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T16:34:22,423 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T16:34:22,423 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T16:34:22,424 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T16:34:22,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741831_1007 (size=1321) 2024-12-09T16:34:22,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741831_1007 (size=1321) 2024-12-09T16:34:22,428 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T16:34:22,428 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa 2024-12-09T16:34:22,434 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T16:34:22,434 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T16:34:22,435 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.large.0-1733762062434,5,FailOnTimeoutGroup] 2024-12-09T16:34:22,435 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.small.0-1733762062435,5,FailOnTimeoutGroup] 2024-12-09T16:34:22,435 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T16:34:22,435 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T16:34:22,435 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T16:34:22,435 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T16:34:22,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741832_1008 (size=32) 2024-12-09T16:34:22,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741832_1008 (size=32) 2024-12-09T16:34:22,449 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:34:22,456 INFO [RS:0;dfce6ce5b10c:39501 {}] regionserver.HRegionServer(746): ClusterId : c3e9b90a-1f6e-4cbb-a5f2-1301c0de34bd 2024-12-09T16:34:22,456 DEBUG [RS:0;dfce6ce5b10c:39501 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T16:34:22,458 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T16:34:22,460 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T16:34:22,460 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:34:22,460 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:34:22,460 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T16:34:22,462 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T16:34:22,462 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:34:22,463 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:34:22,463 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T16:34:22,465 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T16:34:22,465 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:34:22,466 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:34:22,467 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T16:34:22,467 DEBUG [RS:0;dfce6ce5b10c:39501 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T16:34:22,467 DEBUG [RS:0;dfce6ce5b10c:39501 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T16:34:22,469 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T16:34:22,469 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:34:22,470 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:34:22,470 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T16:34:22,472 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/hbase/meta/1588230740 2024-12-09T16:34:22,473 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/hbase/meta/1588230740 2024-12-09T16:34:22,475 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T16:34:22,475 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T16:34:22,476 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T16:34:22,477 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T16:34:22,478 DEBUG [RS:0;dfce6ce5b10c:39501 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T16:34:22,479 DEBUG [RS:0;dfce6ce5b10c:39501 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@743de7aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=dfce6ce5b10c/172.17.0.2:0 2024-12-09T16:34:22,480 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T16:34:22,481 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=704107, jitterRate=-0.10468214750289917}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T16:34:22,482 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733762062450Initializing all the Stores at 1733762062450Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733762062451 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733762062458 (+7 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733762062458Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733762062458Cleaning up temporary data from old regions at 1733762062475 (+17 ms)Region opened successfully at 1733762062482 (+7 ms) 2024-12-09T16:34:22,482 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T16:34:22,482 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T16:34:22,482 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T16:34:22,482 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T16:34:22,482 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T16:34:22,483 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T16:34:22,483 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733762062482Disabling compacts and flushes for region at 1733762062482Disabling writes for close at 1733762062482Writing region close event to WAL at 1733762062482Closed at 1733762062482 2024-12-09T16:34:22,498 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T16:34:22,498 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T16:34:22,498 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T16:34:22,500 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T16:34:22,501 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T16:34:22,504 DEBUG [RS:0;dfce6ce5b10c:39501 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;dfce6ce5b10c:39501 2024-12-09T16:34:22,504 INFO [RS:0;dfce6ce5b10c:39501 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T16:34:22,504 INFO [RS:0;dfce6ce5b10c:39501 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T16:34:22,504 DEBUG [RS:0;dfce6ce5b10c:39501 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T16:34:22,505 INFO [RS:0;dfce6ce5b10c:39501 {}] regionserver.HRegionServer(2659): reportForDuty to master=dfce6ce5b10c,45079,1733762061826 with port=39501, startcode=1733762062000 2024-12-09T16:34:22,506 DEBUG [RS:0;dfce6ce5b10c:39501 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T16:34:22,513 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36199, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T16:34:22,513 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45079 {}] master.ServerManager(363): Checking decommissioned status of RegionServer dfce6ce5b10c,39501,1733762062000 2024-12-09T16:34:22,513 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45079 {}] master.ServerManager(517): Registering regionserver=dfce6ce5b10c,39501,1733762062000 2024-12-09T16:34:22,516 DEBUG [RS:0;dfce6ce5b10c:39501 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa 2024-12-09T16:34:22,516 DEBUG [RS:0;dfce6ce5b10c:39501 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45027 2024-12-09T16:34:22,516 DEBUG [RS:0;dfce6ce5b10c:39501 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T16:34:22,522 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:22,527 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T16:34:22,527 DEBUG [RS:0;dfce6ce5b10c:39501 {}] zookeeper.ZKUtil(111): regionserver:39501-0x100bfc794870001, quorum=127.0.0.1:63832, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/dfce6ce5b10c,39501,1733762062000 2024-12-09T16:34:22,528 WARN [RS:0;dfce6ce5b10c:39501 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T16:34:22,528 INFO [RS:0;dfce6ce5b10c:39501 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T16:34:22,528 DEBUG [RS:0;dfce6ce5b10c:39501 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/WALs/dfce6ce5b10c,39501,1733762062000 2024-12-09T16:34:22,530 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [dfce6ce5b10c,39501,1733762062000] 2024-12-09T16:34:22,554 INFO [RS:0;dfce6ce5b10c:39501 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T16:34:22,557 INFO [RS:0;dfce6ce5b10c:39501 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T16:34:22,557 INFO [RS:0;dfce6ce5b10c:39501 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T16:34:22,558 INFO [RS:0;dfce6ce5b10c:39501 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:34:22,560 INFO [RS:0;dfce6ce5b10c:39501 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T16:34:22,562 INFO [RS:0;dfce6ce5b10c:39501 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T16:34:22,562 INFO [RS:0;dfce6ce5b10c:39501 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T16:34:22,563 DEBUG [RS:0;dfce6ce5b10c:39501 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:34:22,563 DEBUG [RS:0;dfce6ce5b10c:39501 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:34:22,563 DEBUG [RS:0;dfce6ce5b10c:39501 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:34:22,563 DEBUG [RS:0;dfce6ce5b10c:39501 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:34:22,563 DEBUG [RS:0;dfce6ce5b10c:39501 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:34:22,563 DEBUG [RS:0;dfce6ce5b10c:39501 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/dfce6ce5b10c:0, corePoolSize=2, maxPoolSize=2 2024-12-09T16:34:22,563 DEBUG [RS:0;dfce6ce5b10c:39501 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:34:22,563 DEBUG [RS:0;dfce6ce5b10c:39501 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:34:22,563 DEBUG [RS:0;dfce6ce5b10c:39501 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:34:22,563 DEBUG [RS:0;dfce6ce5b10c:39501 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:34:22,563 DEBUG [RS:0;dfce6ce5b10c:39501 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:34:22,563 DEBUG [RS:0;dfce6ce5b10c:39501 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:34:22,563 DEBUG [RS:0;dfce6ce5b10c:39501 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/dfce6ce5b10c:0, corePoolSize=3, maxPoolSize=3 2024-12-09T16:34:22,563 DEBUG [RS:0;dfce6ce5b10c:39501 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0, corePoolSize=3, maxPoolSize=3 2024-12-09T16:34:22,566 INFO [RS:0;dfce6ce5b10c:39501 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T16:34:22,566 INFO [RS:0;dfce6ce5b10c:39501 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T16:34:22,566 INFO [RS:0;dfce6ce5b10c:39501 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:34:22,566 INFO [RS:0;dfce6ce5b10c:39501 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T16:34:22,566 INFO [RS:0;dfce6ce5b10c:39501 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T16:34:22,566 INFO [RS:0;dfce6ce5b10c:39501 {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,39501,1733762062000-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T16:34:22,581 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:22,588 INFO [RS:0;dfce6ce5b10c:39501 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T16:34:22,588 INFO [RS:0;dfce6ce5b10c:39501 {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,39501,1733762062000-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:34:22,589 INFO [RS:0;dfce6ce5b10c:39501 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:34:22,589 INFO [RS:0;dfce6ce5b10c:39501 {}] regionserver.Replication(171): dfce6ce5b10c,39501,1733762062000 started 2024-12-09T16:34:22,606 INFO [RS:0;dfce6ce5b10c:39501 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:34:22,606 INFO [RS:0;dfce6ce5b10c:39501 {}] regionserver.HRegionServer(1482): Serving as dfce6ce5b10c,39501,1733762062000, RpcServer on dfce6ce5b10c/172.17.0.2:39501, sessionid=0x100bfc794870001 2024-12-09T16:34:22,606 DEBUG [RS:0;dfce6ce5b10c:39501 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T16:34:22,606 DEBUG [RS:0;dfce6ce5b10c:39501 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager dfce6ce5b10c,39501,1733762062000 2024-12-09T16:34:22,606 DEBUG [RS:0;dfce6ce5b10c:39501 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'dfce6ce5b10c,39501,1733762062000' 2024-12-09T16:34:22,606 DEBUG [RS:0;dfce6ce5b10c:39501 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T16:34:22,607 DEBUG [RS:0;dfce6ce5b10c:39501 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T16:34:22,609 DEBUG [RS:0;dfce6ce5b10c:39501 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T16:34:22,609 DEBUG [RS:0;dfce6ce5b10c:39501 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T16:34:22,609 DEBUG [RS:0;dfce6ce5b10c:39501 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager dfce6ce5b10c,39501,1733762062000 2024-12-09T16:34:22,609 DEBUG [RS:0;dfce6ce5b10c:39501 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'dfce6ce5b10c,39501,1733762062000' 2024-12-09T16:34:22,609 DEBUG [RS:0;dfce6ce5b10c:39501 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T16:34:22,609 DEBUG [RS:0;dfce6ce5b10c:39501 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T16:34:22,610 DEBUG [RS:0;dfce6ce5b10c:39501 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T16:34:22,610 INFO [RS:0;dfce6ce5b10c:39501 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T16:34:22,610 INFO [RS:0;dfce6ce5b10c:39501 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T16:34:22,652 WARN [dfce6ce5b10c:45079 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T16:34:22,718 INFO [RS:0;dfce6ce5b10c:39501 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=dfce6ce5b10c%2C39501%2C1733762062000, suffix=, logDir=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/WALs/dfce6ce5b10c,39501,1733762062000, archiveDir=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/oldWALs, maxLogs=32 2024-12-09T16:34:22,719 INFO [RS:0;dfce6ce5b10c:39501 {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C39501%2C1733762062000.1733762062718 2024-12-09T16:34:22,725 INFO [RS:0;dfce6ce5b10c:39501 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/WALs/dfce6ce5b10c,39501,1733762062000/dfce6ce5b10c%2C39501%2C1733762062000.1733762062718 2024-12-09T16:34:22,734 DEBUG [RS:0;dfce6ce5b10c:39501 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41691:41691),(127.0.0.1/127.0.0.1:44167:44167)] 2024-12-09T16:34:22,902 DEBUG [dfce6ce5b10c:45079 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T16:34:22,902 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=dfce6ce5b10c,39501,1733762062000 2024-12-09T16:34:22,904 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as dfce6ce5b10c,39501,1733762062000, state=OPENING 2024-12-09T16:34:22,941 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T16:34:22,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:34:22,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39501-0x100bfc794870001, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:34:22,952 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:34:22,952 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T16:34:22,953 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:34:22,953 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=dfce6ce5b10c,39501,1733762062000}] 2024-12-09T16:34:23,106 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T16:34:23,108 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35315, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T16:34:23,111 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T16:34:23,111 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T16:34:23,113 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=dfce6ce5b10c%2C39501%2C1733762062000.meta, suffix=.meta, logDir=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/WALs/dfce6ce5b10c,39501,1733762062000, archiveDir=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/oldWALs, maxLogs=32 2024-12-09T16:34:23,113 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C39501%2C1733762062000.meta.1733762063113.meta 2024-12-09T16:34:23,121 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/WALs/dfce6ce5b10c,39501,1733762062000/dfce6ce5b10c%2C39501%2C1733762062000.meta.1733762063113.meta 2024-12-09T16:34:23,123 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41691:41691),(127.0.0.1/127.0.0.1:44167:44167)] 2024-12-09T16:34:23,124 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T16:34:23,124 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T16:34:23,124 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T16:34:23,124 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T16:34:23,124 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T16:34:23,124 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:34:23,125 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T16:34:23,125 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T16:34:23,126 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T16:34:23,127 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T16:34:23,127 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:34:23,128 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:34:23,128 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T16:34:23,129 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T16:34:23,129 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:34:23,129 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:34:23,129 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T16:34:23,130 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T16:34:23,130 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:34:23,130 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:34:23,130 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T16:34:23,131 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T16:34:23,131 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:34:23,132 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:34:23,132 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T16:34:23,133 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/hbase/meta/1588230740 2024-12-09T16:34:23,134 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/hbase/meta/1588230740 2024-12-09T16:34:23,135 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T16:34:23,135 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T16:34:23,135 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T16:34:23,136 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T16:34:23,137 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=698100, jitterRate=-0.11232101917266846}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T16:34:23,137 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T16:34:23,138 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733762063125Writing region info on filesystem at 1733762063125Initializing all the Stores at 1733762063126 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733762063126Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733762063126Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733762063126Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733762063126Cleaning up temporary data from old regions at 1733762063135 (+9 ms)Running coprocessor post-open hooks at 1733762063137 (+2 ms)Region opened successfully at 1733762063138 (+1 ms) 2024-12-09T16:34:23,139 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733762063106 2024-12-09T16:34:23,141 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T16:34:23,141 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T16:34:23,142 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=dfce6ce5b10c,39501,1733762062000 2024-12-09T16:34:23,143 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as dfce6ce5b10c,39501,1733762062000, state=OPEN 2024-12-09T16:34:23,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:23,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39501-0x100bfc794870001, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T16:34:23,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T16:34:23,178 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=dfce6ce5b10c,39501,1733762062000 2024-12-09T16:34:23,178 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:34:23,178 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:34:23,182 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T16:34:23,182 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=dfce6ce5b10c,39501,1733762062000 in 225 msec 2024-12-09T16:34:23,186 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T16:34:23,186 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 684 msec 2024-12-09T16:34:23,187 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T16:34:23,187 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T16:34:23,189 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T16:34:23,189 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=dfce6ce5b10c,39501,1733762062000, seqNum=-1] 2024-12-09T16:34:23,189 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T16:34:23,191 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41069, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T16:34:23,197 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 798 msec 2024-12-09T16:34:23,197 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733762063197, completionTime=-1 2024-12-09T16:34:23,198 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T16:34:23,198 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T16:34:23,200 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T16:34:23,200 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733762123200 2024-12-09T16:34:23,200 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733762183200 2024-12-09T16:34:23,200 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-09T16:34:23,201 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,45079,1733762061826-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:34:23,201 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,45079,1733762061826-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:34:23,201 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,45079,1733762061826-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:34:23,201 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-dfce6ce5b10c:45079, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:34:23,201 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T16:34:23,201 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T16:34:23,203 DEBUG [master/dfce6ce5b10c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T16:34:23,206 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.121sec 2024-12-09T16:34:23,206 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T16:34:23,206 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T16:34:23,207 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T16:34:23,207 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T16:34:23,207 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T16:34:23,207 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,45079,1733762061826-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T16:34:23,207 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,45079,1733762061826-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T16:34:23,209 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T16:34:23,209 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T16:34:23,209 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,45079,1733762061826-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:34:23,257 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5512995b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T16:34:23,257 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request dfce6ce5b10c,45079,-1 for getting cluster id 2024-12-09T16:34:23,257 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T16:34:23,262 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c3e9b90a-1f6e-4cbb-a5f2-1301c0de34bd' 2024-12-09T16:34:23,263 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T16:34:23,263 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c3e9b90a-1f6e-4cbb-a5f2-1301c0de34bd" 2024-12-09T16:34:23,263 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e859eb8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T16:34:23,263 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [dfce6ce5b10c,45079,-1] 2024-12-09T16:34:23,264 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T16:34:23,264 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:34:23,265 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44280, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T16:34:23,267 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19f9f221, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T16:34:23,268 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T16:34:23,269 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=dfce6ce5b10c,39501,1733762062000, seqNum=-1] 2024-12-09T16:34:23,270 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T16:34:23,271 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50968, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T16:34:23,273 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=dfce6ce5b10c,45079,1733762061826 2024-12-09T16:34:23,274 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:34:23,276 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T16:34:23,277 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T16:34:23,278 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is dfce6ce5b10c,45079,1733762061826 2024-12-09T16:34:23,278 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@71f5dc7b 2024-12-09T16:34:23,279 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T16:34:23,281 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44284, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T16:34:23,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45079 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T16:34:23,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45079 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T16:34:23,282 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45079 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T16:34:23,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45079 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-12-09T16:34:23,288 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T16:34:23,288 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:34:23,294 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45079 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-12-09T16:34:23,295 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T16:34:23,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45079 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T16:34:23,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741835_1011 (size=381) 2024-12-09T16:34:23,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741835_1011 (size=381) 2024-12-09T16:34:23,317 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8e82fa94a813eccad63b97a5d7c1d816, NAME => 'TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa 2024-12-09T16:34:23,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741836_1012 (size=64) 2024-12-09T16:34:23,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741836_1012 (size=64) 2024-12-09T16:34:23,326 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:34:23,326 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 8e82fa94a813eccad63b97a5d7c1d816, disabling compactions & flushes 2024-12-09T16:34:23,326 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816. 2024-12-09T16:34:23,326 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816. 2024-12-09T16:34:23,326 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816. after waiting 0 ms 2024-12-09T16:34:23,326 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816. 2024-12-09T16:34:23,326 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816. 2024-12-09T16:34:23,326 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8e82fa94a813eccad63b97a5d7c1d816: Waiting for close lock at 1733762063326Disabling compacts and flushes for region at 1733762063326Disabling writes for close at 1733762063326Writing region close event to WAL at 1733762063326Closed at 1733762063326 2024-12-09T16:34:23,328 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T16:34:23,329 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733762063328"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733762063328"}]},"ts":"1733762063328"} 2024-12-09T16:34:23,332 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T16:34:23,333 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T16:34:23,334 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733762063334"}]},"ts":"1733762063334"} 2024-12-09T16:34:23,337 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-12-09T16:34:23,337 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8e82fa94a813eccad63b97a5d7c1d816, ASSIGN}] 2024-12-09T16:34:23,339 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8e82fa94a813eccad63b97a5d7c1d816, ASSIGN 2024-12-09T16:34:23,340 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8e82fa94a813eccad63b97a5d7c1d816, ASSIGN; state=OFFLINE, location=dfce6ce5b10c,39501,1733762062000; forceNewPlan=false, retain=false 2024-12-09T16:34:23,386 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,386 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,386 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,387 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,387 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,387 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,388 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,388 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,426 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,427 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,427 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,427 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,427 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,427 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,432 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,433 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,433 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,437 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,491 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8e82fa94a813eccad63b97a5d7c1d816, regionState=OPENING, regionLocation=dfce6ce5b10c,39501,1733762062000 2024-12-09T16:34:23,494 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8e82fa94a813eccad63b97a5d7c1d816, ASSIGN because future has completed 2024-12-09T16:34:23,496 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8e82fa94a813eccad63b97a5d7c1d816, server=dfce6ce5b10c,39501,1733762062000}] 2024-12-09T16:34:23,523 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:23,582 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:23,661 INFO [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816. 2024-12-09T16:34:23,661 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 8e82fa94a813eccad63b97a5d7c1d816, NAME => 'TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816.', STARTKEY => '', ENDKEY => ''} 2024-12-09T16:34:23,661 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 8e82fa94a813eccad63b97a5d7c1d816 2024-12-09T16:34:23,661 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:34:23,662 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 8e82fa94a813eccad63b97a5d7c1d816 2024-12-09T16:34:23,662 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 8e82fa94a813eccad63b97a5d7c1d816 2024-12-09T16:34:23,665 INFO [StoreOpener-8e82fa94a813eccad63b97a5d7c1d816-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 8e82fa94a813eccad63b97a5d7c1d816 2024-12-09T16:34:23,668 INFO [StoreOpener-8e82fa94a813eccad63b97a5d7c1d816-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8e82fa94a813eccad63b97a5d7c1d816 columnFamilyName info 2024-12-09T16:34:23,668 DEBUG [StoreOpener-8e82fa94a813eccad63b97a5d7c1d816-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:34:23,668 INFO [StoreOpener-8e82fa94a813eccad63b97a5d7c1d816-1 {}] regionserver.HStore(327): Store=8e82fa94a813eccad63b97a5d7c1d816/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:34:23,669 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 8e82fa94a813eccad63b97a5d7c1d816 2024-12-09T16:34:23,670 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816 2024-12-09T16:34:23,670 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816 2024-12-09T16:34:23,671 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 8e82fa94a813eccad63b97a5d7c1d816 2024-12-09T16:34:23,671 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 8e82fa94a813eccad63b97a5d7c1d816 2024-12-09T16:34:23,674 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 8e82fa94a813eccad63b97a5d7c1d816 2024-12-09T16:34:23,678 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T16:34:23,679 INFO [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 8e82fa94a813eccad63b97a5d7c1d816; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=743205, jitterRate=-0.05496673285961151}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T16:34:23,679 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8e82fa94a813eccad63b97a5d7c1d816 2024-12-09T16:34:23,681 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 8e82fa94a813eccad63b97a5d7c1d816: Running coprocessor pre-open hook at 1733762063662Writing region info on filesystem at 1733762063662Initializing all the Stores at 1733762063663 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733762063663Cleaning up temporary data from old regions at 1733762063671 (+8 ms)Running coprocessor post-open hooks at 1733762063679 (+8 ms)Region opened successfully at 1733762063681 (+2 ms) 2024-12-09T16:34:23,682 INFO [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816., pid=6, masterSystemTime=1733762063649 2024-12-09T16:34:23,684 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816. 2024-12-09T16:34:23,685 INFO [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816. 2024-12-09T16:34:23,685 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8e82fa94a813eccad63b97a5d7c1d816, regionState=OPEN, openSeqNum=2, regionLocation=dfce6ce5b10c,39501,1733762062000 2024-12-09T16:34:23,688 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8e82fa94a813eccad63b97a5d7c1d816, server=dfce6ce5b10c,39501,1733762062000 because future has completed 2024-12-09T16:34:23,693 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T16:34:23,693 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 8e82fa94a813eccad63b97a5d7c1d816, server=dfce6ce5b10c,39501,1733762062000 in 194 msec 2024-12-09T16:34:23,695 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T16:34:23,695 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8e82fa94a813eccad63b97a5d7c1d816, ASSIGN in 356 msec 2024-12-09T16:34:23,697 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T16:34:23,697 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733762063697"}]},"ts":"1733762063697"} 2024-12-09T16:34:23,700 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-12-09T16:34:23,701 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T16:34:23,704 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 419 msec 2024-12-09T16:34:23,942 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T16:34:23,943 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,943 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,943 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,943 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,944 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,944 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,944 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,944 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,960 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,964 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,965 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,965 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:23,967 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:24,146 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:24,523 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:24,582 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:25,147 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:25,524 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:25,583 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:26,148 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:26,525 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:26,584 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:27,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:27,525 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:27,584 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:28,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:28,526 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:28,554 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T16:34:28,554 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-09T16:34:28,585 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:29,150 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:29,164 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T16:34:29,165 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:29,166 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:29,166 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:29,166 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:29,167 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:29,167 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:29,168 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:29,168 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:29,206 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:29,207 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:29,207 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:29,208 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:29,208 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:29,209 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:29,215 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:29,216 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:29,216 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:29,226 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:29,527 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:29,586 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:29,638 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T16:34:29,638 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T16:34:29,639 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T16:34:29,639 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T16:34:29,640 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T16:34:29,640 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-09T16:34:29,640 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-09T16:34:29,640 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-09T16:34:30,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:30,528 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:30,586 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:31,153 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:31,528 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:31,587 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:32,154 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:32,529 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:32,588 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:33,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:33,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45079 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T16:34:33,371 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-12-09T16:34:33,371 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-12-09T16:34:33,375 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-12-09T16:34:33,375 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816. 2024-12-09T16:34:33,379 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=2] 2024-12-09T16:34:33,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39501 {}] regionserver.HRegion(8855): Flush requested on 8e82fa94a813eccad63b97a5d7c1d816 2024-12-09T16:34:33,400 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8e82fa94a813eccad63b97a5d7c1d816 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T16:34:33,427 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/.tmp/info/cd70ddf43d954649a7a8ee1602703abc is 1080, key is row0001/info:/1733762073382/Put/seqid=0 2024-12-09T16:34:33,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741837_1013 (size=12509) 2024-12-09T16:34:33,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741837_1013 (size=12509) 2024-12-09T16:34:33,471 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/.tmp/info/cd70ddf43d954649a7a8ee1602703abc 2024-12-09T16:34:33,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=8e82fa94a813eccad63b97a5d7c1d816, server=dfce6ce5b10c,39501,1733762062000 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T16:34:33,484 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/.tmp/info/cd70ddf43d954649a7a8ee1602703abc as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/cd70ddf43d954649a7a8ee1602703abc 2024-12-09T16:34:33,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:50968 deadline: 1733762083482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=8e82fa94a813eccad63b97a5d7c1d816, server=dfce6ce5b10c,39501,1733762062000 2024-12-09T16:34:33,511 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/cd70ddf43d954649a7a8ee1602703abc, entries=7, sequenceid=11, filesize=12.2 K 2024-12-09T16:34:33,513 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 8e82fa94a813eccad63b97a5d7c1d816 in 112ms, sequenceid=11, compaction requested=false 2024-12-09T16:34:33,513 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8e82fa94a813eccad63b97a5d7c1d816: 2024-12-09T16:34:33,519 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=8e82fa94a813eccad63b97a5d7c1d816, server=dfce6ce5b10c,39501,1733762062000 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:34:33,520 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=8e82fa94a813eccad63b97a5d7c1d816, server=dfce6ce5b10c,39501,1733762062000 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:34:33,520 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=2 because the exception is null or not the one we care about 2024-12-09T16:34:33,530 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:33,589 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:34,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:34,531 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:34,589 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:35,156 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:35,534 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:35,590 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:36,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:36,535 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:36,591 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:37,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:37,535 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:37,592 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:38,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:38,536 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:38,592 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:39,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:39,537 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:39,593 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:40,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:40,537 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:40,594 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:41,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:41,538 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:41,594 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:42,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:42,539 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:42,595 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:43,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:43,540 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:43,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] regionserver.HRegion(8855): Flush requested on 8e82fa94a813eccad63b97a5d7c1d816 2024-12-09T16:34:43,563 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8e82fa94a813eccad63b97a5d7c1d816 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-09T16:34:43,570 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/.tmp/info/ad0f44f75c8549b38347e126c2bb55d4 is 1080, key is row0008/info:/1733762073402/Put/seqid=0 2024-12-09T16:34:43,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741838_1014 (size=29761) 2024-12-09T16:34:43,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741838_1014 (size=29761) 2024-12-09T16:34:43,576 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/.tmp/info/ad0f44f75c8549b38347e126c2bb55d4 2024-12-09T16:34:43,595 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:43,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/.tmp/info/ad0f44f75c8549b38347e126c2bb55d4 as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/ad0f44f75c8549b38347e126c2bb55d4 2024-12-09T16:34:43,608 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/ad0f44f75c8549b38347e126c2bb55d4, entries=23, sequenceid=37, filesize=29.1 K 2024-12-09T16:34:43,609 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 8e82fa94a813eccad63b97a5d7c1d816 in 46ms, sequenceid=37, compaction requested=false 2024-12-09T16:34:43,610 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8e82fa94a813eccad63b97a5d7c1d816: 2024-12-09T16:34:43,610 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-12-09T16:34:43,610 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T16:34:43,610 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/ad0f44f75c8549b38347e126c2bb55d4 because midkey is the same as first or last row 2024-12-09T16:34:44,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:44,540 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:44,596 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:45,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:45,541 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:45,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] regionserver.HRegion(8855): Flush requested on 8e82fa94a813eccad63b97a5d7c1d816 2024-12-09T16:34:45,581 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8e82fa94a813eccad63b97a5d7c1d816 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T16:34:45,588 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/.tmp/info/e5cb047e1457471880ecfd0dc7e5e47b is 1080, key is row0031/info:/1733762083565/Put/seqid=0 2024-12-09T16:34:45,597 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:45,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741839_1015 (size=12509) 2024-12-09T16:34:45,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741839_1015 (size=12509) 2024-12-09T16:34:45,606 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/.tmp/info/e5cb047e1457471880ecfd0dc7e5e47b 2024-12-09T16:34:45,614 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/.tmp/info/e5cb047e1457471880ecfd0dc7e5e47b as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/e5cb047e1457471880ecfd0dc7e5e47b 2024-12-09T16:34:45,622 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/e5cb047e1457471880ecfd0dc7e5e47b, entries=7, sequenceid=47, filesize=12.2 K 2024-12-09T16:34:45,623 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for 8e82fa94a813eccad63b97a5d7c1d816 in 42ms, sequenceid=47, compaction requested=true 2024-12-09T16:34:45,624 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8e82fa94a813eccad63b97a5d7c1d816: 2024-12-09T16:34:45,624 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-12-09T16:34:45,624 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T16:34:45,624 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/ad0f44f75c8549b38347e126c2bb55d4 because midkey is the same as first or last row 2024-12-09T16:34:45,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] regionserver.HRegion(8855): Flush requested on 8e82fa94a813eccad63b97a5d7c1d816 2024-12-09T16:34:45,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e82fa94a813eccad63b97a5d7c1d816:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T16:34:45,631 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T16:34:45,631 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T16:34:45,631 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8e82fa94a813eccad63b97a5d7c1d816 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-12-09T16:34:45,632 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T16:34:45,633 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1541): 8e82fa94a813eccad63b97a5d7c1d816/info is initiating minor compaction (all files) 2024-12-09T16:34:45,633 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8e82fa94a813eccad63b97a5d7c1d816/info in TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816. 2024-12-09T16:34:45,633 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/cd70ddf43d954649a7a8ee1602703abc, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/ad0f44f75c8549b38347e126c2bb55d4, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/e5cb047e1457471880ecfd0dc7e5e47b] into tmpdir=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/.tmp, totalSize=53.5 K 2024-12-09T16:34:45,638 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting cd70ddf43d954649a7a8ee1602703abc, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733762073382 2024-12-09T16:34:45,639 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting ad0f44f75c8549b38347e126c2bb55d4, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733762073402 2024-12-09T16:34:45,640 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting e5cb047e1457471880ecfd0dc7e5e47b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733762083565 2024-12-09T16:34:45,650 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/.tmp/info/b5b81380150944869909cfe763fa088b is 1080, key is row0038/info:/1733762085582/Put/seqid=0 2024-12-09T16:34:45,680 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e82fa94a813eccad63b97a5d7c1d816#info#compaction#59 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T16:34:45,681 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/.tmp/info/0f2ffd676b774e9fab1174da033bd70c is 1080, key is row0001/info:/1733762073382/Put/seqid=0 2024-12-09T16:34:45,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741840_1016 (size=23299) 2024-12-09T16:34:45,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741840_1016 (size=23299) 2024-12-09T16:34:45,703 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=67 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/.tmp/info/b5b81380150944869909cfe763fa088b 2024-12-09T16:34:45,711 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/.tmp/info/b5b81380150944869909cfe763fa088b as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/b5b81380150944869909cfe763fa088b 2024-12-09T16:34:45,719 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/b5b81380150944869909cfe763fa088b, entries=17, sequenceid=67, filesize=22.8 K 2024-12-09T16:34:45,722 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=10.51 KB/10760 for 8e82fa94a813eccad63b97a5d7c1d816 in 91ms, sequenceid=67, compaction requested=false 2024-12-09T16:34:45,722 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8e82fa94a813eccad63b97a5d7c1d816: 2024-12-09T16:34:45,722 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=76.2 K, sizeToCheck=16.0 K 2024-12-09T16:34:45,723 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T16:34:45,723 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/ad0f44f75c8549b38347e126c2bb55d4 because midkey is the same as first or last row 2024-12-09T16:34:45,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741841_1017 (size=44978) 2024-12-09T16:34:45,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741841_1017 (size=44978) 2024-12-09T16:34:46,146 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/.tmp/info/0f2ffd676b774e9fab1174da033bd70c as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/0f2ffd676b774e9fab1174da033bd70c 2024-12-09T16:34:46,162 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8e82fa94a813eccad63b97a5d7c1d816/info of 8e82fa94a813eccad63b97a5d7c1d816 into 0f2ffd676b774e9fab1174da033bd70c(size=43.9 K), total size for store is 66.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T16:34:46,162 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8e82fa94a813eccad63b97a5d7c1d816: 2024-12-09T16:34:46,162 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816., storeName=8e82fa94a813eccad63b97a5d7c1d816/info, priority=13, startTime=1733762085624; duration=0sec 2024-12-09T16:34:46,162 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.7 K, sizeToCheck=16.0 K 2024-12-09T16:34:46,162 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T16:34:46,163 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/0f2ffd676b774e9fab1174da033bd70c because midkey is the same as first or last row 2024-12-09T16:34:46,163 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.7 K, sizeToCheck=16.0 K 2024-12-09T16:34:46,163 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T16:34:46,163 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/0f2ffd676b774e9fab1174da033bd70c because midkey is the same as first or last row 2024-12-09T16:34:46,163 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.7 K, sizeToCheck=16.0 K 2024-12-09T16:34:46,163 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T16:34:46,163 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/0f2ffd676b774e9fab1174da033bd70c because midkey is the same as first or last row 2024-12-09T16:34:46,163 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T16:34:46,163 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e82fa94a813eccad63b97a5d7c1d816:info 2024-12-09T16:34:46,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:46,542 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:46,598 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:47,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:47,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:47,598 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:47,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] regionserver.HRegion(8855): Flush requested on 8e82fa94a813eccad63b97a5d7c1d816 2024-12-09T16:34:47,673 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8e82fa94a813eccad63b97a5d7c1d816 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-09T16:34:47,679 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/.tmp/info/0c9d362e02c34c36a9cb0e7356517356 is 1080, key is row0055/info:/1733762085632/Put/seqid=0 2024-12-09T16:34:47,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741842_1018 (size=16817) 2024-12-09T16:34:47,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741842_1018 (size=16817) 2024-12-09T16:34:47,769 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=8e82fa94a813eccad63b97a5d7c1d816, server=dfce6ce5b10c,39501,1733762062000 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T16:34:47,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:50968 deadline: 1733762097769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=8e82fa94a813eccad63b97a5d7c1d816, server=dfce6ce5b10c,39501,1733762062000 2024-12-09T16:34:47,771 DEBUG [Time-limited test {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=8e82fa94a813eccad63b97a5d7c1d816, server=dfce6ce5b10c,39501,1733762062000 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:34:47,771 DEBUG [Time-limited test {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=8e82fa94a813eccad63b97a5d7c1d816, server=dfce6ce5b10c,39501,1733762062000 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:34:47,771 DEBUG [Time-limited test {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=2 because the exception is null or not the one we care about 2024-12-09T16:34:48,142 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/.tmp/info/0c9d362e02c34c36a9cb0e7356517356 2024-12-09T16:34:48,162 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/.tmp/info/0c9d362e02c34c36a9cb0e7356517356 as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/0c9d362e02c34c36a9cb0e7356517356 2024-12-09T16:34:48,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:48,168 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/0c9d362e02c34c36a9cb0e7356517356, entries=11, sequenceid=82, filesize=16.4 K 2024-12-09T16:34:48,169 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=18.91 KB/19368 for 8e82fa94a813eccad63b97a5d7c1d816 in 496ms, sequenceid=82, compaction requested=true 2024-12-09T16:34:48,169 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8e82fa94a813eccad63b97a5d7c1d816: 2024-12-09T16:34:48,169 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-12-09T16:34:48,170 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T16:34:48,170 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/0f2ffd676b774e9fab1174da033bd70c because midkey is the same as first or last row 2024-12-09T16:34:48,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8e82fa94a813eccad63b97a5d7c1d816:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T16:34:48,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T16:34:48,170 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T16:34:48,171 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85094 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T16:34:48,171 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1541): 8e82fa94a813eccad63b97a5d7c1d816/info is initiating minor compaction (all files) 2024-12-09T16:34:48,171 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8e82fa94a813eccad63b97a5d7c1d816/info in TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816. 2024-12-09T16:34:48,171 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/0f2ffd676b774e9fab1174da033bd70c, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/b5b81380150944869909cfe763fa088b, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/0c9d362e02c34c36a9cb0e7356517356] into tmpdir=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/.tmp, totalSize=83.1 K 2024-12-09T16:34:48,172 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0f2ffd676b774e9fab1174da033bd70c, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733762073382 2024-12-09T16:34:48,172 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting b5b81380150944869909cfe763fa088b, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=67, earliestPutTs=1733762085582 2024-12-09T16:34:48,173 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0c9d362e02c34c36a9cb0e7356517356, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733762085632 2024-12-09T16:34:48,245 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8e82fa94a813eccad63b97a5d7c1d816#info#compaction#61 average throughput is 33.35 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T16:34:48,246 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/.tmp/info/76b73bfe42ed42db80e015aaecdea91c is 1080, key is row0001/info:/1733762073382/Put/seqid=0 2024-12-09T16:34:48,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741843_1019 (size=75378) 2024-12-09T16:34:48,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741843_1019 (size=75378) 2024-12-09T16:34:48,268 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/.tmp/info/76b73bfe42ed42db80e015aaecdea91c as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/76b73bfe42ed42db80e015aaecdea91c 2024-12-09T16:34:48,276 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8e82fa94a813eccad63b97a5d7c1d816/info of 8e82fa94a813eccad63b97a5d7c1d816 into 76b73bfe42ed42db80e015aaecdea91c(size=73.6 K), total size for store is 73.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T16:34:48,276 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8e82fa94a813eccad63b97a5d7c1d816: 2024-12-09T16:34:48,276 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816., storeName=8e82fa94a813eccad63b97a5d7c1d816/info, priority=13, startTime=1733762088170; duration=0sec 2024-12-09T16:34:48,276 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-12-09T16:34:48,276 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T16:34:48,276 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-12-09T16:34:48,276 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T16:34:48,277 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.6 K, sizeToCheck=16.0 K 2024-12-09T16:34:48,277 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T16:34:48,278 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T16:34:48,278 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T16:34:48,278 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8e82fa94a813eccad63b97a5d7c1d816:info 2024-12-09T16:34:48,279 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45079 {}] assignment.AssignmentManager(1363): Split request from dfce6ce5b10c,39501,1733762062000, parent={ENCODED => 8e82fa94a813eccad63b97a5d7c1d816, NAME => 'TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-09T16:34:48,285 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45079 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=dfce6ce5b10c,39501,1733762062000 2024-12-09T16:34:48,289 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45079 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=8e82fa94a813eccad63b97a5d7c1d816, daughterA=6b0538698171d50c35c5f6eb07bf4825, daughterB=b21241e15084e886f8038211bba8302b 2024-12-09T16:34:48,294 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=8e82fa94a813eccad63b97a5d7c1d816, daughterA=6b0538698171d50c35c5f6eb07bf4825, daughterB=b21241e15084e886f8038211bba8302b 2024-12-09T16:34:48,294 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=8e82fa94a813eccad63b97a5d7c1d816, daughterA=6b0538698171d50c35c5f6eb07bf4825, daughterB=b21241e15084e886f8038211bba8302b 2024-12-09T16:34:48,294 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=8e82fa94a813eccad63b97a5d7c1d816, daughterA=6b0538698171d50c35c5f6eb07bf4825, daughterB=b21241e15084e886f8038211bba8302b 2024-12-09T16:34:48,305 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8e82fa94a813eccad63b97a5d7c1d816, UNASSIGN}] 2024-12-09T16:34:48,307 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8e82fa94a813eccad63b97a5d7c1d816, UNASSIGN 2024-12-09T16:34:48,310 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=8e82fa94a813eccad63b97a5d7c1d816, regionState=CLOSING, regionLocation=dfce6ce5b10c,39501,1733762062000 2024-12-09T16:34:48,314 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8e82fa94a813eccad63b97a5d7c1d816, UNASSIGN because future has completed 2024-12-09T16:34:48,315 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-09T16:34:48,315 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8e82fa94a813eccad63b97a5d7c1d816, server=dfce6ce5b10c,39501,1733762062000}] 2024-12-09T16:34:48,478 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 8e82fa94a813eccad63b97a5d7c1d816 2024-12-09T16:34:48,478 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-09T16:34:48,479 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 8e82fa94a813eccad63b97a5d7c1d816, disabling compactions & flushes 2024-12-09T16:34:48,479 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816. 2024-12-09T16:34:48,479 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816. 2024-12-09T16:34:48,479 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816. after waiting 0 ms 2024-12-09T16:34:48,479 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816. 2024-12-09T16:34:48,479 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 8e82fa94a813eccad63b97a5d7c1d816 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-12-09T16:34:48,490 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/.tmp/info/76cecf5450df43c1abac9c3037e45e4a is 1080, key is row0066/info:/1733762087675/Put/seqid=0 2024-12-09T16:34:48,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741844_1020 (size=24376) 2024-12-09T16:34:48,524 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=104 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/.tmp/info/76cecf5450df43c1abac9c3037e45e4a 2024-12-09T16:34:48,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741844_1020 (size=24376) 2024-12-09T16:34:48,533 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/.tmp/info/76cecf5450df43c1abac9c3037e45e4a as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/76cecf5450df43c1abac9c3037e45e4a 2024-12-09T16:34:48,542 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/76cecf5450df43c1abac9c3037e45e4a, entries=18, sequenceid=104, filesize=23.8 K 2024-12-09T16:34:48,543 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:48,545 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=0 B/0 for 8e82fa94a813eccad63b97a5d7c1d816 in 66ms, sequenceid=104, compaction requested=false 2024-12-09T16:34:48,546 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/cd70ddf43d954649a7a8ee1602703abc, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/ad0f44f75c8549b38347e126c2bb55d4, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/0f2ffd676b774e9fab1174da033bd70c, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/e5cb047e1457471880ecfd0dc7e5e47b, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/b5b81380150944869909cfe763fa088b, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/0c9d362e02c34c36a9cb0e7356517356] to archive 2024-12-09T16:34:48,547 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T16:34:48,549 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/cd70ddf43d954649a7a8ee1602703abc to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/cd70ddf43d954649a7a8ee1602703abc 2024-12-09T16:34:48,551 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/ad0f44f75c8549b38347e126c2bb55d4 to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/ad0f44f75c8549b38347e126c2bb55d4 2024-12-09T16:34:48,552 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/0f2ffd676b774e9fab1174da033bd70c to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/0f2ffd676b774e9fab1174da033bd70c 2024-12-09T16:34:48,554 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/e5cb047e1457471880ecfd0dc7e5e47b to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/e5cb047e1457471880ecfd0dc7e5e47b 2024-12-09T16:34:48,556 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/b5b81380150944869909cfe763fa088b to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/b5b81380150944869909cfe763fa088b 2024-12-09T16:34:48,557 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/0c9d362e02c34c36a9cb0e7356517356 to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/0c9d362e02c34c36a9cb0e7356517356 2024-12-09T16:34:48,570 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/recovered.edits/107.seqid, newMaxSeqId=107, maxSeqId=1 2024-12-09T16:34:48,571 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816. 2024-12-09T16:34:48,571 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 8e82fa94a813eccad63b97a5d7c1d816: Waiting for close lock at 1733762088479Running coprocessor pre-close hooks at 1733762088479Disabling compacts and flushes for region at 1733762088479Disabling writes for close at 1733762088479Obtaining lock to block concurrent updates at 1733762088479Preparing flush snapshotting stores in 8e82fa94a813eccad63b97a5d7c1d816 at 1733762088479Finished memstore snapshotting TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816., syncing WAL and waiting on mvcc, flushsize=dataSize=19368, getHeapSize=20976, getOffHeapSize=0, getCellsCount=18 at 1733762088480 (+1 ms)Flushing stores of TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816. at 1733762088480Flushing 8e82fa94a813eccad63b97a5d7c1d816/info: creating writer at 1733762088481 (+1 ms)Flushing 8e82fa94a813eccad63b97a5d7c1d816/info: appending metadata at 1733762088490 (+9 ms)Flushing 8e82fa94a813eccad63b97a5d7c1d816/info: closing flushed file at 1733762088490Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@10c79fc5: reopening flushed file at 1733762088532 (+42 ms)Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=0 B/0 for 8e82fa94a813eccad63b97a5d7c1d816 in 66ms, sequenceid=104, compaction requested=false at 1733762088545 (+13 ms)Writing region close event to WAL at 1733762088562 (+17 ms)Running coprocessor post-close hooks at 1733762088571 (+9 ms)Closed at 1733762088571 2024-12-09T16:34:48,573 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 8e82fa94a813eccad63b97a5d7c1d816 2024-12-09T16:34:48,574 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=8e82fa94a813eccad63b97a5d7c1d816, regionState=CLOSED 2024-12-09T16:34:48,576 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8e82fa94a813eccad63b97a5d7c1d816, server=dfce6ce5b10c,39501,1733762062000 because future has completed 2024-12-09T16:34:48,581 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-12-09T16:34:48,581 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 8e82fa94a813eccad63b97a5d7c1d816, server=dfce6ce5b10c,39501,1733762062000 in 264 msec 2024-12-09T16:34:48,584 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-09T16:34:48,584 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=8e82fa94a813eccad63b97a5d7c1d816, UNASSIGN in 276 msec 2024-12-09T16:34:48,601 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:34:48,602 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:48,607 INFO [PEWorker-1 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 2 storefiles, region=8e82fa94a813eccad63b97a5d7c1d816, threads=2 2024-12-09T16:34:48,611 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/76b73bfe42ed42db80e015aaecdea91c for region: 8e82fa94a813eccad63b97a5d7c1d816 2024-12-09T16:34:48,611 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/76cecf5450df43c1abac9c3037e45e4a for region: 8e82fa94a813eccad63b97a5d7c1d816 2024-12-09T16:34:48,630 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/76cecf5450df43c1abac9c3037e45e4a, top=true 2024-12-09T16:34:48,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741845_1021 (size=27) 2024-12-09T16:34:48,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741845_1021 (size=27) 2024-12-09T16:34:48,649 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/TestLogRolling-testLogRolling=8e82fa94a813eccad63b97a5d7c1d816-76cecf5450df43c1abac9c3037e45e4a for child: b21241e15084e886f8038211bba8302b, parent: 8e82fa94a813eccad63b97a5d7c1d816 2024-12-09T16:34:48,649 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/76cecf5450df43c1abac9c3037e45e4a for region: 8e82fa94a813eccad63b97a5d7c1d816 2024-12-09T16:34:49,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741846_1022 (size=27) 2024-12-09T16:34:49,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741846_1022 (size=27) 2024-12-09T16:34:49,066 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/76b73bfe42ed42db80e015aaecdea91c for region: 8e82fa94a813eccad63b97a5d7c1d816 2024-12-09T16:34:49,069 DEBUG [PEWorker-1 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 8e82fa94a813eccad63b97a5d7c1d816 Daughter A: [hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/6b0538698171d50c35c5f6eb07bf4825/info/76b73bfe42ed42db80e015aaecdea91c.8e82fa94a813eccad63b97a5d7c1d816] storefiles, Daughter B: [hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/76b73bfe42ed42db80e015aaecdea91c.8e82fa94a813eccad63b97a5d7c1d816, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/TestLogRolling-testLogRolling=8e82fa94a813eccad63b97a5d7c1d816-76cecf5450df43c1abac9c3037e45e4a] storefiles. 2024-12-09T16:34:49,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741847_1023 (size=71) 2024-12-09T16:34:49,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741847_1023 (size=71) 2024-12-09T16:34:49,091 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:34:49,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741848_1024 (size=71) 2024-12-09T16:34:49,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741848_1024 (size=71) 2024-12-09T16:34:49,152 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:34:49,166 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/6b0538698171d50c35c5f6eb07bf4825/recovered.edits/107.seqid, newMaxSeqId=107, maxSeqId=-1 2024-12-09T16:34:49,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:49,169 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/recovered.edits/107.seqid, newMaxSeqId=107, maxSeqId=-1 2024-12-09T16:34:49,178 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733762089172"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1733762089172"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1733762089172"}]},"ts":"1733762089172"} 2024-12-09T16:34:49,178 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733762088285.6b0538698171d50c35c5f6eb07bf4825.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733762089172"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733762089172"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733762089172"}]},"ts":"1733762089172"} 2024-12-09T16:34:49,178 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733762089172"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733762089172"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733762089172"}]},"ts":"1733762089172"} 2024-12-09T16:34:49,201 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6b0538698171d50c35c5f6eb07bf4825, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b21241e15084e886f8038211bba8302b, ASSIGN}] 2024-12-09T16:34:49,203 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6b0538698171d50c35c5f6eb07bf4825, ASSIGN 2024-12-09T16:34:49,203 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b21241e15084e886f8038211bba8302b, ASSIGN 2024-12-09T16:34:49,203 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6b0538698171d50c35c5f6eb07bf4825, ASSIGN; state=SPLITTING_NEW, location=dfce6ce5b10c,39501,1733762062000; forceNewPlan=false, retain=false 2024-12-09T16:34:49,203 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b21241e15084e886f8038211bba8302b, ASSIGN; state=SPLITTING_NEW, location=dfce6ce5b10c,39501,1733762062000; forceNewPlan=false, retain=false 2024-12-09T16:34:49,354 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=b21241e15084e886f8038211bba8302b, regionState=OPENING, regionLocation=dfce6ce5b10c,39501,1733762062000 2024-12-09T16:34:49,354 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=6b0538698171d50c35c5f6eb07bf4825, regionState=OPENING, regionLocation=dfce6ce5b10c,39501,1733762062000 2024-12-09T16:34:49,357 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b21241e15084e886f8038211bba8302b, ASSIGN because future has completed 2024-12-09T16:34:49,358 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure b21241e15084e886f8038211bba8302b, server=dfce6ce5b10c,39501,1733762062000}] 2024-12-09T16:34:49,358 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6b0538698171d50c35c5f6eb07bf4825, ASSIGN because future has completed 2024-12-09T16:34:49,360 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6b0538698171d50c35c5f6eb07bf4825, server=dfce6ce5b10c,39501,1733762062000}] 2024-12-09T16:34:49,515 INFO [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b. 2024-12-09T16:34:49,515 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => b21241e15084e886f8038211bba8302b, NAME => 'TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.', STARTKEY => 'row0062', ENDKEY => ''} 2024-12-09T16:34:49,516 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling b21241e15084e886f8038211bba8302b 2024-12-09T16:34:49,516 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:34:49,516 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for b21241e15084e886f8038211bba8302b 2024-12-09T16:34:49,516 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for b21241e15084e886f8038211bba8302b 2024-12-09T16:34:49,517 INFO [StoreOpener-b21241e15084e886f8038211bba8302b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region b21241e15084e886f8038211bba8302b 2024-12-09T16:34:49,519 INFO [StoreOpener-b21241e15084e886f8038211bba8302b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b21241e15084e886f8038211bba8302b columnFamilyName info 2024-12-09T16:34:49,519 DEBUG [StoreOpener-b21241e15084e886f8038211bba8302b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:34:49,535 DEBUG [StoreOpener-b21241e15084e886f8038211bba8302b-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/76b73bfe42ed42db80e015aaecdea91c.8e82fa94a813eccad63b97a5d7c1d816->hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/76b73bfe42ed42db80e015aaecdea91c-top 2024-12-09T16:34:49,542 DEBUG [StoreOpener-b21241e15084e886f8038211bba8302b-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/TestLogRolling-testLogRolling=8e82fa94a813eccad63b97a5d7c1d816-76cecf5450df43c1abac9c3037e45e4a 2024-12-09T16:34:49,542 INFO [StoreOpener-b21241e15084e886f8038211bba8302b-1 {}] regionserver.HStore(327): Store=b21241e15084e886f8038211bba8302b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:34:49,543 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for b21241e15084e886f8038211bba8302b 2024-12-09T16:34:49,543 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b 2024-12-09T16:34:49,544 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:49,545 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b 2024-12-09T16:34:49,545 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for b21241e15084e886f8038211bba8302b 2024-12-09T16:34:49,545 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for b21241e15084e886f8038211bba8302b 2024-12-09T16:34:49,548 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for b21241e15084e886f8038211bba8302b 2024-12-09T16:34:49,549 INFO [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened b21241e15084e886f8038211bba8302b; next sequenceid=108; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=707519, jitterRate=-0.10034431517124176}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T16:34:49,549 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b21241e15084e886f8038211bba8302b 2024-12-09T16:34:49,549 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for b21241e15084e886f8038211bba8302b: Running coprocessor pre-open hook at 1733762089516Writing region info on filesystem at 1733762089516Initializing all the Stores at 1733762089517 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733762089517Cleaning up temporary data from old regions at 1733762089545 (+28 ms)Running coprocessor post-open hooks at 1733762089549 (+4 ms)Region opened successfully at 1733762089549 2024-12-09T16:34:49,551 INFO [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b., pid=12, masterSystemTime=1733762089510 2024-12-09T16:34:49,551 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store b21241e15084e886f8038211bba8302b:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T16:34:49,551 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-09T16:34:49,552 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T16:34:49,553 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b. 2024-12-09T16:34:49,553 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1541): b21241e15084e886f8038211bba8302b/info is initiating minor compaction (all files) 2024-12-09T16:34:49,554 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b21241e15084e886f8038211bba8302b/info in TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b. 2024-12-09T16:34:49,554 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/76b73bfe42ed42db80e015aaecdea91c.8e82fa94a813eccad63b97a5d7c1d816->hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/76b73bfe42ed42db80e015aaecdea91c-top, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/TestLogRolling-testLogRolling=8e82fa94a813eccad63b97a5d7c1d816-76cecf5450df43c1abac9c3037e45e4a] into tmpdir=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp, totalSize=97.4 K 2024-12-09T16:34:49,555 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting 76b73bfe42ed42db80e015aaecdea91c.8e82fa94a813eccad63b97a5d7c1d816, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1733762073382 2024-12-09T16:34:49,556 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=8e82fa94a813eccad63b97a5d7c1d816-76cecf5450df43c1abac9c3037e45e4a, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1733762087675 2024-12-09T16:34:49,563 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=b21241e15084e886f8038211bba8302b, regionState=OPEN, openSeqNum=108, regionLocation=dfce6ce5b10c,39501,1733762062000 2024-12-09T16:34:49,563 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b. 2024-12-09T16:34:49,563 INFO [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b. 2024-12-09T16:34:49,564 INFO [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733762088285.6b0538698171d50c35c5f6eb07bf4825. 2024-12-09T16:34:49,564 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 6b0538698171d50c35c5f6eb07bf4825, NAME => 'TestLogRolling-testLogRolling,,1733762088285.6b0538698171d50c35c5f6eb07bf4825.', STARTKEY => '', ENDKEY => 'row0062'} 2024-12-09T16:34:49,564 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 6b0538698171d50c35c5f6eb07bf4825 2024-12-09T16:34:49,564 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733762088285.6b0538698171d50c35c5f6eb07bf4825.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:34:49,564 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 6b0538698171d50c35c5f6eb07bf4825 2024-12-09T16:34:49,564 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 6b0538698171d50c35c5f6eb07bf4825 2024-12-09T16:34:49,566 INFO [StoreOpener-6b0538698171d50c35c5f6eb07bf4825-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 6b0538698171d50c35c5f6eb07bf4825 2024-12-09T16:34:49,566 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39501 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-12-09T16:34:49,567 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-09T16:34:49,567 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-12-09T16:34:49,567 INFO [StoreOpener-6b0538698171d50c35c5f6eb07bf4825-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6b0538698171d50c35c5f6eb07bf4825 columnFamilyName info 2024-12-09T16:34:49,567 DEBUG [StoreOpener-6b0538698171d50c35c5f6eb07bf4825-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:34:49,568 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure b21241e15084e886f8038211bba8302b, server=dfce6ce5b10c,39501,1733762062000 because future has completed 2024-12-09T16:34:49,574 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-09T16:34:49,574 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure b21241e15084e886f8038211bba8302b, server=dfce6ce5b10c,39501,1733762062000 in 213 msec 2024-12-09T16:34:49,580 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=b21241e15084e886f8038211bba8302b, ASSIGN in 373 msec 2024-12-09T16:34:49,583 DEBUG [StoreOpener-6b0538698171d50c35c5f6eb07bf4825-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/6b0538698171d50c35c5f6eb07bf4825/info/76b73bfe42ed42db80e015aaecdea91c.8e82fa94a813eccad63b97a5d7c1d816->hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/76b73bfe42ed42db80e015aaecdea91c-bottom 2024-12-09T16:34:49,583 INFO [StoreOpener-6b0538698171d50c35c5f6eb07bf4825-1 {}] regionserver.HStore(327): Store=6b0538698171d50c35c5f6eb07bf4825/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:34:49,584 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b21241e15084e886f8038211bba8302b#info#compaction#63 average throughput is 22.58 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T16:34:49,584 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 6b0538698171d50c35c5f6eb07bf4825 2024-12-09T16:34:49,585 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/691cdeab72b142cd87762587a9df0754 is 1080, key is row0062/info:/1733762085657/Put/seqid=0 2024-12-09T16:34:49,585 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/6b0538698171d50c35c5f6eb07bf4825 2024-12-09T16:34:49,587 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/6b0538698171d50c35c5f6eb07bf4825 2024-12-09T16:34:49,587 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 6b0538698171d50c35c5f6eb07bf4825 2024-12-09T16:34:49,587 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 6b0538698171d50c35c5f6eb07bf4825 2024-12-09T16:34:49,590 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 6b0538698171d50c35c5f6eb07bf4825 2024-12-09T16:34:49,591 INFO [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 6b0538698171d50c35c5f6eb07bf4825; next sequenceid=108; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=737997, jitterRate=-0.061589181423187256}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T16:34:49,591 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6b0538698171d50c35c5f6eb07bf4825 2024-12-09T16:34:49,591 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 6b0538698171d50c35c5f6eb07bf4825: Running coprocessor pre-open hook at 1733762089564Writing region info on filesystem at 1733762089564Initializing all the Stores at 1733762089566 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733762089566Cleaning up temporary data from old regions at 1733762089587 (+21 ms)Running coprocessor post-open hooks at 1733762089591 (+4 ms)Region opened successfully at 1733762089591 2024-12-09T16:34:49,592 INFO [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733762088285.6b0538698171d50c35c5f6eb07bf4825., pid=13, masterSystemTime=1733762089510 2024-12-09T16:34:49,593 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 6b0538698171d50c35c5f6eb07bf4825:info, priority=-2147483648, current under compaction store size is 2 2024-12-09T16:34:49,593 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-09T16:34:49,593 DEBUG [RS:0;dfce6ce5b10c:39501-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-09T16:34:49,595 INFO [RS:0;dfce6ce5b10c:39501-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1733762088285.6b0538698171d50c35c5f6eb07bf4825. 2024-12-09T16:34:49,595 DEBUG [RS:0;dfce6ce5b10c:39501-longCompactions-0 {}] regionserver.HStore(1541): 6b0538698171d50c35c5f6eb07bf4825/info is initiating minor compaction (all files) 2024-12-09T16:34:49,595 INFO [RS:0;dfce6ce5b10c:39501-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 6b0538698171d50c35c5f6eb07bf4825/info in TestLogRolling-testLogRolling,,1733762088285.6b0538698171d50c35c5f6eb07bf4825. 2024-12-09T16:34:49,595 INFO [RS:0;dfce6ce5b10c:39501-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/6b0538698171d50c35c5f6eb07bf4825/info/76b73bfe42ed42db80e015aaecdea91c.8e82fa94a813eccad63b97a5d7c1d816->hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/76b73bfe42ed42db80e015aaecdea91c-bottom] into tmpdir=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/6b0538698171d50c35c5f6eb07bf4825/.tmp, totalSize=73.6 K 2024-12-09T16:34:49,597 DEBUG [RS:0;dfce6ce5b10c:39501-longCompactions-0 {}] compactions.Compactor(225): Compacting 76b73bfe42ed42db80e015aaecdea91c.8e82fa94a813eccad63b97a5d7c1d816, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733762073382 2024-12-09T16:34:49,598 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/hbase/meta/1588230740/.tmp/info/012017ed9d50433890b9b6b31cd28c9e is 193, key is TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b./info:regioninfo/1733762089563/Put/seqid=0 2024-12-09T16:34:49,599 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=6b0538698171d50c35c5f6eb07bf4825, regionState=OPEN, openSeqNum=108, regionLocation=dfce6ce5b10c,39501,1733762062000 2024-12-09T16:34:49,601 DEBUG [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733762088285.6b0538698171d50c35c5f6eb07bf4825. 2024-12-09T16:34:49,601 INFO [RS_OPEN_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733762088285.6b0538698171d50c35c5f6eb07bf4825. 2024-12-09T16:34:49,602 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6b0538698171d50c35c5f6eb07bf4825, server=dfce6ce5b10c,39501,1733762062000 because future has completed 2024-12-09T16:34:49,602 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:49,609 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-12-09T16:34:49,609 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 6b0538698171d50c35c5f6eb07bf4825, server=dfce6ce5b10c,39501,1733762062000 in 246 msec 2024-12-09T16:34:49,613 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-12-09T16:34:49,613 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=6b0538698171d50c35c5f6eb07bf4825, ASSIGN in 408 msec 2024-12-09T16:34:49,616 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=8e82fa94a813eccad63b97a5d7c1d816, daughterA=6b0538698171d50c35c5f6eb07bf4825, daughterB=b21241e15084e886f8038211bba8302b in 1.3280 sec 2024-12-09T16:34:49,621 INFO [RS:0;dfce6ce5b10c:39501-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6b0538698171d50c35c5f6eb07bf4825#info#compaction#65 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T16:34:49,622 DEBUG [RS:0;dfce6ce5b10c:39501-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/6b0538698171d50c35c5f6eb07bf4825/.tmp/info/371dc60f912244428b9bd6d3203dfb0c is 1080, key is row0001/info:/1733762073382/Put/seqid=0 2024-12-09T16:34:49,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741849_1025 (size=28850) 2024-12-09T16:34:49,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741849_1025 (size=28850) 2024-12-09T16:34:49,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741850_1026 (size=9882) 2024-12-09T16:34:49,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741850_1026 (size=9882) 2024-12-09T16:34:49,627 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/hbase/meta/1588230740/.tmp/info/012017ed9d50433890b9b6b31cd28c9e 2024-12-09T16:34:49,632 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/691cdeab72b142cd87762587a9df0754 as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/691cdeab72b142cd87762587a9df0754 2024-12-09T16:34:49,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741851_1027 (size=70862) 2024-12-09T16:34:49,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741851_1027 (size=70862) 2024-12-09T16:34:49,645 DEBUG [RS:0;dfce6ce5b10c:39501-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/6b0538698171d50c35c5f6eb07bf4825/.tmp/info/371dc60f912244428b9bd6d3203dfb0c as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/6b0538698171d50c35c5f6eb07bf4825/info/371dc60f912244428b9bd6d3203dfb0c 2024-12-09T16:34:49,645 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 2 (all) file(s) in b21241e15084e886f8038211bba8302b/info of b21241e15084e886f8038211bba8302b into 691cdeab72b142cd87762587a9df0754(size=28.2 K), total size for store is 28.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T16:34:49,645 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b21241e15084e886f8038211bba8302b: 2024-12-09T16:34:49,645 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b., storeName=b21241e15084e886f8038211bba8302b/info, priority=14, startTime=1733762089551; duration=0sec 2024-12-09T16:34:49,645 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T16:34:49,645 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b21241e15084e886f8038211bba8302b:info 2024-12-09T16:34:49,649 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/hbase/meta/1588230740/.tmp/ns/fe0cb00d4fb0479bb432401f5a785191 is 43, key is default/ns:d/1733762063191/Put/seqid=0 2024-12-09T16:34:49,652 INFO [RS:0;dfce6ce5b10c:39501-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 6b0538698171d50c35c5f6eb07bf4825/info of 6b0538698171d50c35c5f6eb07bf4825 into 371dc60f912244428b9bd6d3203dfb0c(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T16:34:49,652 DEBUG [RS:0;dfce6ce5b10c:39501-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 6b0538698171d50c35c5f6eb07bf4825: 2024-12-09T16:34:49,652 INFO [RS:0;dfce6ce5b10c:39501-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733762088285.6b0538698171d50c35c5f6eb07bf4825., storeName=6b0538698171d50c35c5f6eb07bf4825/info, priority=15, startTime=1733762089593; duration=0sec 2024-12-09T16:34:49,652 DEBUG [RS:0;dfce6ce5b10c:39501-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T16:34:49,652 DEBUG [RS:0;dfce6ce5b10c:39501-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6b0538698171d50c35c5f6eb07bf4825:info 2024-12-09T16:34:49,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741852_1028 (size=5153) 2024-12-09T16:34:49,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741852_1028 (size=5153) 2024-12-09T16:34:49,660 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/hbase/meta/1588230740/.tmp/ns/fe0cb00d4fb0479bb432401f5a785191 2024-12-09T16:34:49,679 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/hbase/meta/1588230740/.tmp/table/b1c846cb512340b4aa5e567dde8006b7 is 65, key is TestLogRolling-testLogRolling/table:state/1733762063697/Put/seqid=0 2024-12-09T16:34:49,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741853_1029 (size=5340) 2024-12-09T16:34:49,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741853_1029 (size=5340) 2024-12-09T16:34:49,684 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/hbase/meta/1588230740/.tmp/table/b1c846cb512340b4aa5e567dde8006b7 2024-12-09T16:34:49,690 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/hbase/meta/1588230740/.tmp/info/012017ed9d50433890b9b6b31cd28c9e as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/hbase/meta/1588230740/info/012017ed9d50433890b9b6b31cd28c9e 2024-12-09T16:34:49,696 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/hbase/meta/1588230740/info/012017ed9d50433890b9b6b31cd28c9e, entries=30, sequenceid=17, filesize=9.7 K 2024-12-09T16:34:49,697 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/hbase/meta/1588230740/.tmp/ns/fe0cb00d4fb0479bb432401f5a785191 as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/hbase/meta/1588230740/ns/fe0cb00d4fb0479bb432401f5a785191 2024-12-09T16:34:49,703 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/hbase/meta/1588230740/ns/fe0cb00d4fb0479bb432401f5a785191, entries=2, sequenceid=17, filesize=5.0 K 2024-12-09T16:34:49,704 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/hbase/meta/1588230740/.tmp/table/b1c846cb512340b4aa5e567dde8006b7 as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/hbase/meta/1588230740/table/b1c846cb512340b4aa5e567dde8006b7 2024-12-09T16:34:49,710 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/hbase/meta/1588230740/table/b1c846cb512340b4aa5e567dde8006b7, entries=2, sequenceid=17, filesize=5.2 K 2024-12-09T16:34:49,711 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 144ms, sequenceid=17, compaction requested=false 2024-12-09T16:34:49,711 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-09T16:34:50,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:50,545 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:50,603 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:51,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:51,545 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:51,604 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:51,772 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T16:34:52,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:52,546 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:52,604 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:53,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:53,547 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:53,572 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:53,572 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:53,572 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:53,572 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:53,573 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:53,573 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:53,574 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:53,574 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:53,606 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:53,606 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:53,606 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:53,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:53,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:53,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:53,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:53,612 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:53,612 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:53,613 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:53,616 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:54,128 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T16:34:54,129 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:54,129 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:54,129 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:54,130 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:54,130 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:54,130 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:54,131 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:54,131 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:54,157 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:54,157 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:54,157 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:54,158 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:54,158 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:54,159 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:54,164 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:54,164 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:54,165 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:54,168 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:34:54,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:54,547 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:54,606 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:55,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:55,548 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:55,607 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:56,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:56,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:56,607 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:57,176 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:57,549 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:57,608 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:57,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:50968 deadline: 1733762107802, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816. is not online on dfce6ce5b10c,39501,1733762062000 2024-12-09T16:34:57,803 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816. is not online on dfce6ce5b10c,39501,1733762062000 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:34:57,803 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816. is not online on dfce6ce5b10c,39501,1733762062000 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:34:57,803 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1733762063281.8e82fa94a813eccad63b97a5d7c1d816., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=2 from cache 2024-12-09T16:34:58,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:58,550 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:58,609 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:59,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:59,551 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:34:59,610 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:00,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:00,552 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:00,610 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:01,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:01,552 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:01,611 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:02,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:02,553 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:02,612 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:03,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:03,212 INFO [master/dfce6ce5b10c:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T16:35:03,212 INFO [master/dfce6ce5b10c:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T16:35:03,554 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:03,612 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:04,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:04,554 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:04,613 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:05,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:05,555 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:05,614 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:06,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:06,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:06,615 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:07,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:07,556 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:07,615 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:08,125 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-12-09T16:35:08,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:08,557 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:08,616 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:09,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:09,558 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:09,616 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:10,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:10,558 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:10,617 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:10,809 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=4, created chunk count=9, reused chunk count=56, reuseRatio=86.15% 2024-12-09T16:35:10,810 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-09T16:35:11,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:11,559 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:11,618 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:12,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:12,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:12,618 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:13,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:13,560 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:13,619 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:14,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:14,561 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:14,619 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:15,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:15,562 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:15,620 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:16,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:16,562 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:16,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:17,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:17,563 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:17,621 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:17,919 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0084', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=108] 2024-12-09T16:35:17,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] regionserver.HRegion(8855): Flush requested on b21241e15084e886f8038211bba8302b 2024-12-09T16:35:17,939 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b21241e15084e886f8038211bba8302b 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T16:35:17,950 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/23536f4c0dd042a69187b83c79deb4e8 is 1080, key is row0084/info:/1733762117922/Put/seqid=0 2024-12-09T16:35:17,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741854_1030 (size=12509) 2024-12-09T16:35:17,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741854_1030 (size=12509) 2024-12-09T16:35:17,973 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/23536f4c0dd042a69187b83c79deb4e8 2024-12-09T16:35:17,980 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/23536f4c0dd042a69187b83c79deb4e8 as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/23536f4c0dd042a69187b83c79deb4e8 2024-12-09T16:35:17,987 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/23536f4c0dd042a69187b83c79deb4e8, entries=7, sequenceid=118, filesize=12.2 K 2024-12-09T16:35:17,989 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6456 for b21241e15084e886f8038211bba8302b in 50ms, sequenceid=118, compaction requested=false 2024-12-09T16:35:17,989 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b21241e15084e886f8038211bba8302b: 2024-12-09T16:35:18,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:18,564 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:18,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:18,640 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:18,640 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:18,641 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:18,641 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:18,641 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:18,641 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:18,642 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:18,643 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:18,675 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:18,676 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:18,676 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:18,677 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:18,678 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:18,678 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:18,683 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:18,683 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:18,684 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:18,686 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:19,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:19,197 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T16:35:19,198 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:19,199 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:19,199 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:19,199 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:19,199 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:19,200 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:19,201 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:19,201 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:19,254 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:19,254 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:19,255 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:19,256 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:19,256 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:19,256 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:19,263 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:19,263 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:19,264 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:19,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T16:35:19,565 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:19,622 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:19,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] regionserver.HRegion(8855): Flush requested on b21241e15084e886f8038211bba8302b 2024-12-09T16:35:19,959 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b21241e15084e886f8038211bba8302b 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T16:35:19,965 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/3881775cc8944a9d94a35281315de290 is 1080, key is row0091/info:/1733762117942/Put/seqid=0 2024-12-09T16:35:19,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741855_1031 (size=12509) 2024-12-09T16:35:19,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741855_1031 (size=12509) 2024-12-09T16:35:20,032 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b21241e15084e886f8038211bba8302b, server=dfce6ce5b10c,39501,1733762062000 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T16:35:20,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:50968 deadline: 1733762130031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b21241e15084e886f8038211bba8302b, server=dfce6ce5b10c,39501,1733762062000 2024-12-09T16:35:20,033 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=108 , the old value is region=TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=108, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b21241e15084e886f8038211bba8302b, server=dfce6ce5b10c,39501,1733762062000 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:35:20,033 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=108 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b21241e15084e886f8038211bba8302b, server=dfce6ce5b10c,39501,1733762062000 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:35:20,033 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=108 because the exception is null or not the one we care about 2024-12-09T16:35:20,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:20,375 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/3881775cc8944a9d94a35281315de290 2024-12-09T16:35:20,394 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/3881775cc8944a9d94a35281315de290 as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/3881775cc8944a9d94a35281315de290 2024-12-09T16:35:20,403 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/3881775cc8944a9d94a35281315de290, entries=7, sequenceid=128, filesize=12.2 K 2024-12-09T16:35:20,405 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for b21241e15084e886f8038211bba8302b in 446ms, sequenceid=128, compaction requested=true 2024-12-09T16:35:20,405 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b21241e15084e886f8038211bba8302b: 2024-12-09T16:35:20,405 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b21241e15084e886f8038211bba8302b:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T16:35:20,405 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T16:35:20,405 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T16:35:20,407 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 53868 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T16:35:20,407 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1541): b21241e15084e886f8038211bba8302b/info is initiating minor compaction (all files) 2024-12-09T16:35:20,408 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b21241e15084e886f8038211bba8302b/info in TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b. 2024-12-09T16:35:20,408 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/691cdeab72b142cd87762587a9df0754, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/23536f4c0dd042a69187b83c79deb4e8, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/3881775cc8944a9d94a35281315de290] into tmpdir=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp, totalSize=52.6 K 2024-12-09T16:35:20,408 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting 691cdeab72b142cd87762587a9df0754, keycount=22, bloomtype=ROW, size=28.2 K, encoding=NONE, compression=NONE, seqNum=104, earliestPutTs=1733762085657 2024-12-09T16:35:20,409 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting 23536f4c0dd042a69187b83c79deb4e8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733762117922 2024-12-09T16:35:20,409 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3881775cc8944a9d94a35281315de290, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1733762117942 2024-12-09T16:35:20,429 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b21241e15084e886f8038211bba8302b#info#compaction#70 average throughput is 18.47 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T16:35:20,430 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/0043fb12f6e94ceea43060ab40088782 is 1080, key is row0062/info:/1733762085657/Put/seqid=0 2024-12-09T16:35:20,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741856_1032 (size=44066) 2024-12-09T16:35:20,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741856_1032 (size=44066) 2024-12-09T16:35:20,460 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/0043fb12f6e94ceea43060ab40088782 as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/0043fb12f6e94ceea43060ab40088782 2024-12-09T16:35:20,475 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b21241e15084e886f8038211bba8302b/info of b21241e15084e886f8038211bba8302b into 0043fb12f6e94ceea43060ab40088782(size=43.0 K), total size for store is 43.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T16:35:20,476 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b21241e15084e886f8038211bba8302b: 2024-12-09T16:35:20,476 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b., storeName=b21241e15084e886f8038211bba8302b/info, priority=13, startTime=1733762120405; duration=0sec 2024-12-09T16:35:20,476 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T16:35:20,476 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b21241e15084e886f8038211bba8302b:info 2024-12-09T16:35:20,565 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:20,623 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:21,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:21,566 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:21,624 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:21,773 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T16:35:22,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:22,567 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:22,625 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:23,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:23,567 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:23,626 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:24,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:24,568 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:24,626 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:25,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:25,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:25,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:26,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:26,569 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:26,627 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:27,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:27,570 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:27,628 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:28,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:28,570 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:28,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:29,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:29,571 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:29,629 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:30,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] regionserver.HRegion(8855): Flush requested on b21241e15084e886f8038211bba8302b 2024-12-09T16:35:30,132 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b21241e15084e886f8038211bba8302b 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-09T16:35:30,137 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/ce1e3777c224423e90e6198ca4bd32e8 is 1080, key is row0098/info:/1733762119961/Put/seqid=0 2024-12-09T16:35:30,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741857_1033 (size=29784) 2024-12-09T16:35:30,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741857_1033 (size=29784) 2024-12-09T16:35:30,142 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/ce1e3777c224423e90e6198ca4bd32e8 2024-12-09T16:35:30,145 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b21241e15084e886f8038211bba8302b, server=dfce6ce5b10c,39501,1733762062000 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T16:35:30,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:50968 deadline: 1733762140145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b21241e15084e886f8038211bba8302b, server=dfce6ce5b10c,39501,1733762062000 2024-12-09T16:35:30,146 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=108 , the old value is region=TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=108, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b21241e15084e886f8038211bba8302b, server=dfce6ce5b10c,39501,1733762062000 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:35:30,146 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=108 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b21241e15084e886f8038211bba8302b, server=dfce6ce5b10c,39501,1733762062000 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:35:30,146 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=108 because the exception is null or not the one we care about 2024-12-09T16:35:30,150 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/ce1e3777c224423e90e6198ca4bd32e8 as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/ce1e3777c224423e90e6198ca4bd32e8 2024-12-09T16:35:30,156 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/ce1e3777c224423e90e6198ca4bd32e8, entries=23, sequenceid=155, filesize=29.1 K 2024-12-09T16:35:30,157 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=6.30 KB/6456 for b21241e15084e886f8038211bba8302b in 25ms, sequenceid=155, compaction requested=false 2024-12-09T16:35:30,157 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b21241e15084e886f8038211bba8302b: 2024-12-09T16:35:30,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:30,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:30,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:31,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:31,572 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:31,630 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:32,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:32,573 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:32,631 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:33,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:33,573 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:33,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:34,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:34,516 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region b21241e15084e886f8038211bba8302b, had cached 0 bytes from a total of 73850 2024-12-09T16:35:34,564 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 6b0538698171d50c35c5f6eb07bf4825, had cached 0 bytes from a total of 70862 2024-12-09T16:35:34,574 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:34,632 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:35,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:35,575 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:35,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:36,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:36,575 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:36,633 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:37,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:37,576 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:37,634 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:38,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:38,576 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:38,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:39,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:39,577 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:39,635 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:40,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] regionserver.HRegion(8855): Flush requested on b21241e15084e886f8038211bba8302b 2024-12-09T16:35:40,203 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b21241e15084e886f8038211bba8302b 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T16:35:40,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:40,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=3 on file=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 after 196162ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:35:40,207 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/ed5932e2296649e3a0d06dd00b33f63e is 1080, key is row0121/info:/1733762130133/Put/seqid=0 2024-12-09T16:35:40,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741858_1034 (size=12516) 2024-12-09T16:35:40,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741858_1034 (size=12516) 2024-12-09T16:35:40,212 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/ed5932e2296649e3a0d06dd00b33f63e 2024-12-09T16:35:40,218 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/ed5932e2296649e3a0d06dd00b33f63e as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/ed5932e2296649e3a0d06dd00b33f63e 2024-12-09T16:35:40,222 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/ed5932e2296649e3a0d06dd00b33f63e, entries=7, sequenceid=165, filesize=12.2 K 2024-12-09T16:35:40,223 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=1.05 KB/1076 for b21241e15084e886f8038211bba8302b in 21ms, sequenceid=165, compaction requested=true 2024-12-09T16:35:40,223 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b21241e15084e886f8038211bba8302b: 2024-12-09T16:35:40,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b21241e15084e886f8038211bba8302b:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T16:35:40,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T16:35:40,224 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T16:35:40,225 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 86366 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T16:35:40,225 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1541): b21241e15084e886f8038211bba8302b/info is initiating minor compaction (all files) 2024-12-09T16:35:40,225 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b21241e15084e886f8038211bba8302b/info in TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b. 2024-12-09T16:35:40,225 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/0043fb12f6e94ceea43060ab40088782, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/ce1e3777c224423e90e6198ca4bd32e8, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/ed5932e2296649e3a0d06dd00b33f63e] into tmpdir=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp, totalSize=84.3 K 2024-12-09T16:35:40,225 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0043fb12f6e94ceea43060ab40088782, keycount=36, bloomtype=ROW, size=43.0 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1733762085657 2024-12-09T16:35:40,226 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting ce1e3777c224423e90e6198ca4bd32e8, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1733762119961 2024-12-09T16:35:40,226 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting ed5932e2296649e3a0d06dd00b33f63e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1733762130133 2024-12-09T16:35:40,236 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b21241e15084e886f8038211bba8302b#info#compaction#73 average throughput is 33.86 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T16:35:40,236 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/1dcd1fe286d3461c93a024031a37bf2c is 1080, key is row0062/info:/1733762085657/Put/seqid=0 2024-12-09T16:35:40,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741859_1035 (size=76649) 2024-12-09T16:35:40,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741859_1035 (size=76649) 2024-12-09T16:35:40,245 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/1dcd1fe286d3461c93a024031a37bf2c as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/1dcd1fe286d3461c93a024031a37bf2c 2024-12-09T16:35:40,251 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b21241e15084e886f8038211bba8302b/info of b21241e15084e886f8038211bba8302b into 1dcd1fe286d3461c93a024031a37bf2c(size=74.9 K), total size for store is 74.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T16:35:40,251 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b21241e15084e886f8038211bba8302b: 2024-12-09T16:35:40,251 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b., storeName=b21241e15084e886f8038211bba8302b/info, priority=13, startTime=1733762140223; duration=0sec 2024-12-09T16:35:40,251 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T16:35:40,251 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b21241e15084e886f8038211bba8302b:info 2024-12-09T16:35:40,577 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:40,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:41,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:41,578 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:41,636 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:42,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:42,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] regionserver.HRegion(8855): Flush requested on b21241e15084e886f8038211bba8302b 2024-12-09T16:35:42,217 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b21241e15084e886f8038211bba8302b 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T16:35:42,221 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/777a68b65f9f499091d1868cd7860da8 is 1080, key is row0128/info:/1733762140204/Put/seqid=0 2024-12-09T16:35:42,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741860_1036 (size=12516) 2024-12-09T16:35:42,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741860_1036 (size=12516) 2024-12-09T16:35:42,228 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/777a68b65f9f499091d1868cd7860da8 2024-12-09T16:35:42,236 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/777a68b65f9f499091d1868cd7860da8 as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/777a68b65f9f499091d1868cd7860da8 2024-12-09T16:35:42,251 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/777a68b65f9f499091d1868cd7860da8, entries=7, sequenceid=176, filesize=12.2 K 2024-12-09T16:35:42,252 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for b21241e15084e886f8038211bba8302b in 35ms, sequenceid=176, compaction requested=false 2024-12-09T16:35:42,252 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b21241e15084e886f8038211bba8302b: 2024-12-09T16:35:42,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] regionserver.HRegion(8855): Flush requested on b21241e15084e886f8038211bba8302b 2024-12-09T16:35:42,254 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b21241e15084e886f8038211bba8302b 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-09T16:35:42,258 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/9bf76b563f3d418ea4f66d92d9ffd0c9 is 1080, key is row0135/info:/1733762142218/Put/seqid=0 2024-12-09T16:35:42,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741861_1037 (size=16828) 2024-12-09T16:35:42,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741861_1037 (size=16828) 2024-12-09T16:35:42,578 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:42,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:42,665 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=190 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/9bf76b563f3d418ea4f66d92d9ffd0c9 2024-12-09T16:35:42,673 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/9bf76b563f3d418ea4f66d92d9ffd0c9 as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/9bf76b563f3d418ea4f66d92d9ffd0c9 2024-12-09T16:35:42,680 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/9bf76b563f3d418ea4f66d92d9ffd0c9, entries=11, sequenceid=190, filesize=16.4 K 2024-12-09T16:35:42,681 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=15.76 KB/16140 for b21241e15084e886f8038211bba8302b in 428ms, sequenceid=190, compaction requested=true 2024-12-09T16:35:42,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b21241e15084e886f8038211bba8302b: 2024-12-09T16:35:42,681 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b21241e15084e886f8038211bba8302b:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T16:35:42,681 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T16:35:42,681 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T16:35:42,683 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 105993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T16:35:42,683 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1541): b21241e15084e886f8038211bba8302b/info is initiating minor compaction (all files) 2024-12-09T16:35:42,683 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b21241e15084e886f8038211bba8302b/info in TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b. 2024-12-09T16:35:42,683 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/1dcd1fe286d3461c93a024031a37bf2c, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/777a68b65f9f499091d1868cd7860da8, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/9bf76b563f3d418ea4f66d92d9ffd0c9] into tmpdir=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp, totalSize=103.5 K 2024-12-09T16:35:42,684 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1dcd1fe286d3461c93a024031a37bf2c, keycount=66, bloomtype=ROW, size=74.9 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1733762085657 2024-12-09T16:35:42,685 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting 777a68b65f9f499091d1868cd7860da8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1733762140204 2024-12-09T16:35:42,685 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9bf76b563f3d418ea4f66d92d9ffd0c9, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=190, earliestPutTs=1733762142218 2024-12-09T16:35:42,700 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b21241e15084e886f8038211bba8302b#info#compaction#76 average throughput is 28.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T16:35:42,700 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/802fcf4c7ece4811af30ed3e1aee5e60 is 1080, key is row0062/info:/1733762085657/Put/seqid=0 2024-12-09T16:35:42,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741862_1038 (size=96159) 2024-12-09T16:35:42,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741862_1038 (size=96159) 2024-12-09T16:35:43,113 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/802fcf4c7ece4811af30ed3e1aee5e60 as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/802fcf4c7ece4811af30ed3e1aee5e60 2024-12-09T16:35:43,119 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b21241e15084e886f8038211bba8302b/info of b21241e15084e886f8038211bba8302b into 802fcf4c7ece4811af30ed3e1aee5e60(size=93.9 K), total size for store is 93.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T16:35:43,119 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b21241e15084e886f8038211bba8302b: 2024-12-09T16:35:43,119 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b., storeName=b21241e15084e886f8038211bba8302b/info, priority=13, startTime=1733762142681; duration=0sec 2024-12-09T16:35:43,119 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T16:35:43,119 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b21241e15084e886f8038211bba8302b:info 2024-12-09T16:35:43,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:43,579 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:43,637 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:44,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:44,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] regionserver.HRegion(8855): Flush requested on b21241e15084e886f8038211bba8302b 2024-12-09T16:35:44,289 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b21241e15084e886f8038211bba8302b 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-12-09T16:35:44,293 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/427cc9d6bcab4f0eb8057a456df1f84f is 1080, key is row0146/info:/1733762142255/Put/seqid=0 2024-12-09T16:35:44,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741863_1039 (size=22238) 2024-12-09T16:35:44,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741863_1039 (size=22238) 2024-12-09T16:35:44,334 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b21241e15084e886f8038211bba8302b, server=dfce6ce5b10c,39501,1733762062000 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T16:35:44,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:50968 deadline: 1733762154334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b21241e15084e886f8038211bba8302b, server=dfce6ce5b10c,39501,1733762062000 2024-12-09T16:35:44,335 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=108 , the old value is region=TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=108, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b21241e15084e886f8038211bba8302b, server=dfce6ce5b10c,39501,1733762062000 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:35:44,335 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=108 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b21241e15084e886f8038211bba8302b, server=dfce6ce5b10c,39501,1733762062000 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:35:44,335 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=108 because the exception is null or not the one we care about 2024-12-09T16:35:44,579 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:44,638 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:44,707 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/427cc9d6bcab4f0eb8057a456df1f84f 2024-12-09T16:35:44,716 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/427cc9d6bcab4f0eb8057a456df1f84f as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/427cc9d6bcab4f0eb8057a456df1f84f 2024-12-09T16:35:44,724 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/427cc9d6bcab4f0eb8057a456df1f84f, entries=16, sequenceid=210, filesize=21.7 K 2024-12-09T16:35:44,725 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=13.66 KB/13988 for b21241e15084e886f8038211bba8302b in 437ms, sequenceid=210, compaction requested=false 2024-12-09T16:35:44,725 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b21241e15084e886f8038211bba8302b: 2024-12-09T16:35:45,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:45,580 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:45,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:46,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:46,581 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:46,639 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:47,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:47,581 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:47,640 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:48,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:48,582 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:48,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:49,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:49,582 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:49,641 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:50,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:50,583 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:50,584 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=3 on file=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 after 196156ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:35:50,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:50,642 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=3 on file=hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta after 196160ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T16:35:51,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:51,584 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:51,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:51,773 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T16:35:52,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:52,585 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:52,643 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:53,125 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-12-09T16:35:53,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:53,586 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:53,644 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:54,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:54,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] regionserver.HRegion(8855): Flush requested on b21241e15084e886f8038211bba8302b 2024-12-09T16:35:54,433 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b21241e15084e886f8038211bba8302b 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-09T16:35:54,458 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/9d10981efde646e4b5b18008dcd6c5f5 is 1080, key is row0162/info:/1733762144295/Put/seqid=0 2024-12-09T16:35:54,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741864_1040 (size=20078) 2024-12-09T16:35:54,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741864_1040 (size=20078) 2024-12-09T16:35:54,487 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=227 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/9d10981efde646e4b5b18008dcd6c5f5 2024-12-09T16:35:54,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/9d10981efde646e4b5b18008dcd6c5f5 as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/9d10981efde646e4b5b18008dcd6c5f5 2024-12-09T16:35:54,510 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b21241e15084e886f8038211bba8302b, server=dfce6ce5b10c,39501,1733762062000 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-09T16:35:54,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:50968 deadline: 1733762164510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b21241e15084e886f8038211bba8302b, server=dfce6ce5b10c,39501,1733762062000 2024-12-09T16:35:54,511 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=108 , the old value is region=TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=108, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b21241e15084e886f8038211bba8302b, server=dfce6ce5b10c,39501,1733762062000 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:35:54,511 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=108 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=b21241e15084e886f8038211bba8302b, server=dfce6ce5b10c,39501,1733762062000 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T16:35:54,511 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b., hostname=dfce6ce5b10c,39501,1733762062000, seqNum=108 because the exception is null or not the one we care about 2024-12-09T16:35:54,522 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/9d10981efde646e4b5b18008dcd6c5f5, entries=14, sequenceid=227, filesize=19.6 K 2024-12-09T16:35:54,523 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=15.76 KB/16140 for b21241e15084e886f8038211bba8302b in 90ms, sequenceid=227, compaction requested=true 2024-12-09T16:35:54,523 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b21241e15084e886f8038211bba8302b: 2024-12-09T16:35:54,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b21241e15084e886f8038211bba8302b:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T16:35:54,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T16:35:54,524 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T16:35:54,525 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 138475 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T16:35:54,525 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1541): b21241e15084e886f8038211bba8302b/info is initiating minor compaction (all files) 2024-12-09T16:35:54,525 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b21241e15084e886f8038211bba8302b/info in TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b. 2024-12-09T16:35:54,525 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/802fcf4c7ece4811af30ed3e1aee5e60, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/427cc9d6bcab4f0eb8057a456df1f84f, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/9d10981efde646e4b5b18008dcd6c5f5] into tmpdir=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp, totalSize=135.2 K 2024-12-09T16:35:54,526 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting 802fcf4c7ece4811af30ed3e1aee5e60, keycount=84, bloomtype=ROW, size=93.9 K, encoding=NONE, compression=NONE, seqNum=190, earliestPutTs=1733762085657 2024-12-09T16:35:54,526 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting 427cc9d6bcab4f0eb8057a456df1f84f, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733762142255 2024-12-09T16:35:54,527 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9d10981efde646e4b5b18008dcd6c5f5, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1733762144295 2024-12-09T16:35:54,577 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b21241e15084e886f8038211bba8302b#info#compaction#79 average throughput is 38.99 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T16:35:54,578 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/3c4a8ed201694edf86c2df69e8d96899 is 1080, key is row0062/info:/1733762085657/Put/seqid=0 2024-12-09T16:35:54,586 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:54,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741865_1041 (size=128737) 2024-12-09T16:35:54,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741865_1041 (size=128737) 2024-12-09T16:35:54,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:54,655 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/3c4a8ed201694edf86c2df69e8d96899 as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/3c4a8ed201694edf86c2df69e8d96899 2024-12-09T16:35:54,665 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b21241e15084e886f8038211bba8302b/info of b21241e15084e886f8038211bba8302b into 3c4a8ed201694edf86c2df69e8d96899(size=125.7 K), total size for store is 125.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T16:35:54,665 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b21241e15084e886f8038211bba8302b: 2024-12-09T16:35:54,665 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b., storeName=b21241e15084e886f8038211bba8302b/info, priority=13, startTime=1733762154524; duration=0sec 2024-12-09T16:35:54,665 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T16:35:54,665 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b21241e15084e886f8038211bba8302b:info 2024-12-09T16:35:55,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:55,587 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:55,645 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:56,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:56,588 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:56,646 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:57,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:57,588 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:57,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:58,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:58,589 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:58,647 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:59,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:59,589 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:35:59,648 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:00,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:00,590 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:00,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:01,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:01,590 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:01,649 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:02,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:02,591 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:02,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:03,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:03,591 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:03,650 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:04,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:04,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] regionserver.HRegion(8855): Flush requested on b21241e15084e886f8038211bba8302b 2024-12-09T16:36:04,582 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b21241e15084e886f8038211bba8302b 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-12-09T16:36:04,586 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/365b06ff738443e0a7995ecb8dd0aaf0 is 1080, key is row0176/info:/1733762154435/Put/seqid=0 2024-12-09T16:36:04,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741866_1042 (size=22238) 2024-12-09T16:36:04,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741866_1042 (size=22238) 2024-12-09T16:36:04,592 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:04,593 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/365b06ff738443e0a7995ecb8dd0aaf0 2024-12-09T16:36:04,599 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/365b06ff738443e0a7995ecb8dd0aaf0 as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/365b06ff738443e0a7995ecb8dd0aaf0 2024-12-09T16:36:04,604 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/365b06ff738443e0a7995ecb8dd0aaf0, entries=16, sequenceid=247, filesize=21.7 K 2024-12-09T16:36:04,605 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=1.05 KB/1076 for b21241e15084e886f8038211bba8302b in 22ms, sequenceid=247, compaction requested=false 2024-12-09T16:36:04,605 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b21241e15084e886f8038211bba8302b: 2024-12-09T16:36:04,651 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:05,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:05,592 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:05,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:06,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:06,593 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:06,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] regionserver.HRegion(8855): Flush requested on b21241e15084e886f8038211bba8302b 2024-12-09T16:36:06,597 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b21241e15084e886f8038211bba8302b 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T16:36:06,602 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/da94af8a6ae24b9284a14ccd35e78ba6 is 1080, key is row0192/info:/1733762164583/Put/seqid=0 2024-12-09T16:36:06,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741867_1043 (size=12517) 2024-12-09T16:36:06,607 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=257 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/da94af8a6ae24b9284a14ccd35e78ba6 2024-12-09T16:36:06,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741867_1043 (size=12517) 2024-12-09T16:36:06,614 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/da94af8a6ae24b9284a14ccd35e78ba6 as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/da94af8a6ae24b9284a14ccd35e78ba6 2024-12-09T16:36:06,621 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/da94af8a6ae24b9284a14ccd35e78ba6, entries=7, sequenceid=257, filesize=12.2 K 2024-12-09T16:36:06,622 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for b21241e15084e886f8038211bba8302b in 25ms, sequenceid=257, compaction requested=true 2024-12-09T16:36:06,622 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b21241e15084e886f8038211bba8302b: 2024-12-09T16:36:06,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b21241e15084e886f8038211bba8302b:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T16:36:06,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T16:36:06,622 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T16:36:06,623 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 163492 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T16:36:06,623 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1541): b21241e15084e886f8038211bba8302b/info is initiating minor compaction (all files) 2024-12-09T16:36:06,624 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b21241e15084e886f8038211bba8302b/info in TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b. 2024-12-09T16:36:06,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] regionserver.HRegion(8855): Flush requested on b21241e15084e886f8038211bba8302b 2024-12-09T16:36:06,624 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/3c4a8ed201694edf86c2df69e8d96899, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/365b06ff738443e0a7995ecb8dd0aaf0, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/da94af8a6ae24b9284a14ccd35e78ba6] into tmpdir=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp, totalSize=159.7 K 2024-12-09T16:36:06,624 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b21241e15084e886f8038211bba8302b 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-09T16:36:06,624 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3c4a8ed201694edf86c2df69e8d96899, keycount=114, bloomtype=ROW, size=125.7 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1733762085657 2024-12-09T16:36:06,625 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting 365b06ff738443e0a7995ecb8dd0aaf0, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1733762154435 2024-12-09T16:36:06,625 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting da94af8a6ae24b9284a14ccd35e78ba6, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=257, earliestPutTs=1733762164583 2024-12-09T16:36:06,628 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/9f900077a3cb44e78564b6aa8ff16363 is 1080, key is row0199/info:/1733762166599/Put/seqid=0 2024-12-09T16:36:06,652 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:06,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741868_1044 (size=16839) 2024-12-09T16:36:06,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741868_1044 (size=16839) 2024-12-09T16:36:06,657 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/9f900077a3cb44e78564b6aa8ff16363 2024-12-09T16:36:06,658 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b21241e15084e886f8038211bba8302b#info#compaction#83 average throughput is 35.15 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T16:36:06,659 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/d3bd3f1be2d34966930e2bdc204d4f07 is 1080, key is row0062/info:/1733762085657/Put/seqid=0 2024-12-09T16:36:06,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741869_1045 (size=153707) 2024-12-09T16:36:06,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741869_1045 (size=153707) 2024-12-09T16:36:06,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/9f900077a3cb44e78564b6aa8ff16363 as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/9f900077a3cb44e78564b6aa8ff16363 2024-12-09T16:36:06,674 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/d3bd3f1be2d34966930e2bdc204d4f07 as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/d3bd3f1be2d34966930e2bdc204d4f07 2024-12-09T16:36:06,674 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/9f900077a3cb44e78564b6aa8ff16363, entries=11, sequenceid=271, filesize=16.4 K 2024-12-09T16:36:06,676 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=15.76 KB/16140 for b21241e15084e886f8038211bba8302b in 51ms, sequenceid=271, compaction requested=false 2024-12-09T16:36:06,676 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b21241e15084e886f8038211bba8302b: 2024-12-09T16:36:06,681 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b21241e15084e886f8038211bba8302b/info of b21241e15084e886f8038211bba8302b into d3bd3f1be2d34966930e2bdc204d4f07(size=150.1 K), total size for store is 166.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T16:36:06,681 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b21241e15084e886f8038211bba8302b: 2024-12-09T16:36:06,681 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b., storeName=b21241e15084e886f8038211bba8302b/info, priority=13, startTime=1733762166622; duration=0sec 2024-12-09T16:36:06,681 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T16:36:06,681 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b21241e15084e886f8038211bba8302b:info 2024-12-09T16:36:07,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:07,594 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:07,653 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:08,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:08,594 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:08,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:08,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] regionserver.HRegion(8855): Flush requested on b21241e15084e886f8038211bba8302b 2024-12-09T16:36:08,668 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b21241e15084e886f8038211bba8302b 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-12-09T16:36:08,673 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/86b35e5d02d74fbb9dab6d6f616bfe6f is 1080, key is row0210/info:/1733762166625/Put/seqid=0 2024-12-09T16:36:08,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741870_1046 (size=22254) 2024-12-09T16:36:08,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741870_1046 (size=22254) 2024-12-09T16:36:08,679 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/86b35e5d02d74fbb9dab6d6f616bfe6f 2024-12-09T16:36:08,685 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/86b35e5d02d74fbb9dab6d6f616bfe6f as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/86b35e5d02d74fbb9dab6d6f616bfe6f 2024-12-09T16:36:08,692 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/86b35e5d02d74fbb9dab6d6f616bfe6f, entries=16, sequenceid=291, filesize=21.7 K 2024-12-09T16:36:08,693 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=10.51 KB/10760 for b21241e15084e886f8038211bba8302b in 24ms, sequenceid=291, compaction requested=true 2024-12-09T16:36:08,693 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b21241e15084e886f8038211bba8302b: 2024-12-09T16:36:08,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b21241e15084e886f8038211bba8302b:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T16:36:08,693 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T16:36:08,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T16:36:08,694 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 192800 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T16:36:08,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] regionserver.HRegion(8855): Flush requested on b21241e15084e886f8038211bba8302b 2024-12-09T16:36:08,694 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1541): b21241e15084e886f8038211bba8302b/info is initiating minor compaction (all files) 2024-12-09T16:36:08,694 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b21241e15084e886f8038211bba8302b/info in TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b. 2024-12-09T16:36:08,694 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b21241e15084e886f8038211bba8302b 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-12-09T16:36:08,694 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/d3bd3f1be2d34966930e2bdc204d4f07, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/9f900077a3cb44e78564b6aa8ff16363, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/86b35e5d02d74fbb9dab6d6f616bfe6f] into tmpdir=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp, totalSize=188.3 K 2024-12-09T16:36:08,695 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting d3bd3f1be2d34966930e2bdc204d4f07, keycount=137, bloomtype=ROW, size=150.1 K, encoding=NONE, compression=NONE, seqNum=257, earliestPutTs=1733762085657 2024-12-09T16:36:08,696 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9f900077a3cb44e78564b6aa8ff16363, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1733762166599 2024-12-09T16:36:08,696 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting 86b35e5d02d74fbb9dab6d6f616bfe6f, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733762166625 2024-12-09T16:36:08,699 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/3af190e92ed047468c57bf694ffed749 is 1080, key is row0226/info:/1733762168670/Put/seqid=0 2024-12-09T16:36:08,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741871_1047 (size=16839) 2024-12-09T16:36:08,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741871_1047 (size=16839) 2024-12-09T16:36:08,708 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=305 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/3af190e92ed047468c57bf694ffed749 2024-12-09T16:36:08,713 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b21241e15084e886f8038211bba8302b#info#compaction#86 average throughput is 56.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T16:36:08,714 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/b3cbda4500a94afab8a7a3af9b5c0356 is 1080, key is row0062/info:/1733762085657/Put/seqid=0 2024-12-09T16:36:08,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/3af190e92ed047468c57bf694ffed749 as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/3af190e92ed047468c57bf694ffed749 2024-12-09T16:36:08,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741872_1048 (size=182950) 2024-12-09T16:36:08,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741872_1048 (size=182950) 2024-12-09T16:36:08,729 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/3af190e92ed047468c57bf694ffed749, entries=11, sequenceid=305, filesize=16.4 K 2024-12-09T16:36:08,731 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=11.56 KB/11836 for b21241e15084e886f8038211bba8302b in 37ms, sequenceid=305, compaction requested=false 2024-12-09T16:36:08,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b21241e15084e886f8038211bba8302b: 2024-12-09T16:36:08,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39501 {}] regionserver.HRegion(8855): Flush requested on b21241e15084e886f8038211bba8302b 2024-12-09T16:36:08,734 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing b21241e15084e886f8038211bba8302b 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-09T16:36:08,735 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/b3cbda4500a94afab8a7a3af9b5c0356 as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/b3cbda4500a94afab8a7a3af9b5c0356 2024-12-09T16:36:08,741 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/0fc4b4d1c4384856a83721ae367e66b8 is 1080, key is row0237/info:/1733762168697/Put/seqid=0 2024-12-09T16:36:08,747 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b21241e15084e886f8038211bba8302b/info of b21241e15084e886f8038211bba8302b into b3cbda4500a94afab8a7a3af9b5c0356(size=178.7 K), total size for store is 195.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T16:36:08,747 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b21241e15084e886f8038211bba8302b: 2024-12-09T16:36:08,748 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b., storeName=b21241e15084e886f8038211bba8302b/info, priority=13, startTime=1733762168693; duration=0sec 2024-12-09T16:36:08,748 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T16:36:08,748 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b21241e15084e886f8038211bba8302b:info 2024-12-09T16:36:08,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741873_1049 (size=17918) 2024-12-09T16:36:08,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741873_1049 (size=17918) 2024-12-09T16:36:08,769 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=320 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/0fc4b4d1c4384856a83721ae367e66b8 2024-12-09T16:36:08,775 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/0fc4b4d1c4384856a83721ae367e66b8 as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/0fc4b4d1c4384856a83721ae367e66b8 2024-12-09T16:36:08,780 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/0fc4b4d1c4384856a83721ae367e66b8, entries=12, sequenceid=320, filesize=17.5 K 2024-12-09T16:36:08,781 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=8.41 KB/8608 for b21241e15084e886f8038211bba8302b in 47ms, sequenceid=320, compaction requested=true 2024-12-09T16:36:08,781 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for b21241e15084e886f8038211bba8302b: 2024-12-09T16:36:08,781 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b21241e15084e886f8038211bba8302b:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T16:36:08,781 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T16:36:08,781 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T16:36:08,782 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 217707 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T16:36:08,782 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1541): b21241e15084e886f8038211bba8302b/info is initiating minor compaction (all files) 2024-12-09T16:36:08,782 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of b21241e15084e886f8038211bba8302b/info in TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b. 2024-12-09T16:36:08,782 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/b3cbda4500a94afab8a7a3af9b5c0356, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/3af190e92ed047468c57bf694ffed749, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/0fc4b4d1c4384856a83721ae367e66b8] into tmpdir=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp, totalSize=212.6 K 2024-12-09T16:36:08,783 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting b3cbda4500a94afab8a7a3af9b5c0356, keycount=164, bloomtype=ROW, size=178.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1733762085657 2024-12-09T16:36:08,783 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3af190e92ed047468c57bf694ffed749, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1733762168670 2024-12-09T16:36:08,784 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0fc4b4d1c4384856a83721ae367e66b8, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1733762168697 2024-12-09T16:36:08,799 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b21241e15084e886f8038211bba8302b#info#compaction#88 average throughput is 63.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T16:36:08,800 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/08119dead4e8439fa6f12ac398f48cbd is 1080, key is row0062/info:/1733762085657/Put/seqid=0 2024-12-09T16:36:08,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741874_1050 (size=207946) 2024-12-09T16:36:08,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741874_1050 (size=207946) 2024-12-09T16:36:08,843 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/08119dead4e8439fa6f12ac398f48cbd as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/08119dead4e8439fa6f12ac398f48cbd 2024-12-09T16:36:08,871 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in b21241e15084e886f8038211bba8302b/info of b21241e15084e886f8038211bba8302b into 08119dead4e8439fa6f12ac398f48cbd(size=203.1 K), total size for store is 203.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T16:36:08,871 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for b21241e15084e886f8038211bba8302b: 2024-12-09T16:36:08,871 INFO [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b., storeName=b21241e15084e886f8038211bba8302b/info, priority=13, startTime=1733762168781; duration=0sec 2024-12-09T16:36:08,871 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T16:36:08,871 DEBUG [RS:0;dfce6ce5b10c:39501-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b21241e15084e886f8038211bba8302b:info 2024-12-09T16:36:09,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:09,595 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:09,654 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:10,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:10,595 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:10,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:10,760 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-12-09T16:36:10,761 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C39501%2C1733762062000.1733762170760 2024-12-09T16:36:10,776 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:10,777 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:10,777 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:10,777 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:10,777 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:10,777 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/WALs/dfce6ce5b10c,39501,1733762062000/dfce6ce5b10c%2C39501%2C1733762062000.1733762062718 with entries=311, filesize=307.98 KB; new WAL /user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/WALs/dfce6ce5b10c,39501,1733762062000/dfce6ce5b10c%2C39501%2C1733762062000.1733762170760 2024-12-09T16:36:10,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741833_1009 (size=315382) 2024-12-09T16:36:10,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741833_1009 (size=315382) 2024-12-09T16:36:10,779 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44167:44167),(127.0.0.1/127.0.0.1:41691:41691)] 2024-12-09T16:36:10,779 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/WALs/dfce6ce5b10c,39501,1733762062000/dfce6ce5b10c%2C39501%2C1733762062000.1733762062718 is not closed yet, will try archiving it next time 2024-12-09T16:36:10,782 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 6b0538698171d50c35c5f6eb07bf4825: 2024-12-09T16:36:10,782 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing b21241e15084e886f8038211bba8302b 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-12-09T16:36:10,786 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/77d59e895da34b0ab181d3dde6eb89d4 is 1080, key is row0249/info:/1733762168735/Put/seqid=0 2024-12-09T16:36:10,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741876_1052 (size=13602) 2024-12-09T16:36:10,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741876_1052 (size=13602) 2024-12-09T16:36:10,794 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/77d59e895da34b0ab181d3dde6eb89d4 2024-12-09T16:36:10,800 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/.tmp/info/77d59e895da34b0ab181d3dde6eb89d4 as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/77d59e895da34b0ab181d3dde6eb89d4 2024-12-09T16:36:10,806 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/77d59e895da34b0ab181d3dde6eb89d4, entries=8, sequenceid=333, filesize=13.3 K 2024-12-09T16:36:10,807 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=0 B/0 for b21241e15084e886f8038211bba8302b in 25ms, sequenceid=333, compaction requested=false 2024-12-09T16:36:10,807 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for b21241e15084e886f8038211bba8302b: 2024-12-09T16:36:10,807 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-12-09T16:36:10,812 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/hbase/meta/1588230740/.tmp/info/20b105c7af8a42719d79fdd299123130 is 186, key is TestLogRolling-testLogRolling,,1733762088285.6b0538698171d50c35c5f6eb07bf4825./info:regioninfo/1733762089598/Put/seqid=0 2024-12-09T16:36:10,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741877_1053 (size=6153) 2024-12-09T16:36:10,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741877_1053 (size=6153) 2024-12-09T16:36:10,819 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/hbase/meta/1588230740/.tmp/info/20b105c7af8a42719d79fdd299123130 2024-12-09T16:36:10,826 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/hbase/meta/1588230740/.tmp/info/20b105c7af8a42719d79fdd299123130 as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/hbase/meta/1588230740/info/20b105c7af8a42719d79fdd299123130 2024-12-09T16:36:10,833 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/hbase/meta/1588230740/info/20b105c7af8a42719d79fdd299123130, entries=5, sequenceid=21, filesize=6.0 K 2024-12-09T16:36:10,834 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 27ms, sequenceid=21, compaction requested=false 2024-12-09T16:36:10,834 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-09T16:36:10,835 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C39501%2C1733762062000.1733762170834 2024-12-09T16:36:10,842 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:10,842 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:10,842 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:10,842 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:10,842 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:10,842 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/WALs/dfce6ce5b10c,39501,1733762062000/dfce6ce5b10c%2C39501%2C1733762062000.1733762170760 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/WALs/dfce6ce5b10c,39501,1733762062000/dfce6ce5b10c%2C39501%2C1733762062000.1733762170834 2024-12-09T16:36:10,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741875_1051 (size=731) 2024-12-09T16:36:10,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741875_1051 (size=731) 2024-12-09T16:36:10,850 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/WALs/dfce6ce5b10c,39501,1733762062000/dfce6ce5b10c%2C39501%2C1733762062000.1733762062718 to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/oldWALs/dfce6ce5b10c%2C39501%2C1733762062000.1733762062718 2024-12-09T16:36:10,850 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41691:41691),(127.0.0.1/127.0.0.1:44167:44167)] 2024-12-09T16:36:10,851 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T16:36:10,851 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T16:36:10,851 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T16:36:10,851 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T16:36:10,851 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:36:10,851 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/WALs/dfce6ce5b10c,39501,1733762062000/dfce6ce5b10c%2C39501%2C1733762062000.1733762170760 to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/oldWALs/dfce6ce5b10c%2C39501%2C1733762062000.1733762170760 2024-12-09T16:36:10,852 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:36:10,852 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T16:36:10,852 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T16:36:10,852 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=384552494, stopped=false 2024-12-09T16:36:10,852 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=dfce6ce5b10c,45079,1733762061826 2024-12-09T16:36:10,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39501-0x100bfc794870001, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T16:36:10,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T16:36:10,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39501-0x100bfc794870001, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:36:10,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:36:10,895 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T16:36:10,895 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T16:36:10,896 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T16:36:10,896 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:36:10,896 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39501-0x100bfc794870001, quorum=127.0.0.1:63832, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:36:10,896 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'dfce6ce5b10c,39501,1733762062000' ***** 2024-12-09T16:36:10,896 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T16:36:10,896 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:36:10,896 INFO [RS:0;dfce6ce5b10c:39501 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T16:36:10,897 INFO [RS:0;dfce6ce5b10c:39501 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T16:36:10,897 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T16:36:10,897 INFO [RS:0;dfce6ce5b10c:39501 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T16:36:10,897 INFO [RS:0;dfce6ce5b10c:39501 {}] regionserver.HRegionServer(3091): Received CLOSE for 6b0538698171d50c35c5f6eb07bf4825 2024-12-09T16:36:10,897 INFO [RS:0;dfce6ce5b10c:39501 {}] regionserver.HRegionServer(3091): Received CLOSE for b21241e15084e886f8038211bba8302b 2024-12-09T16:36:10,897 INFO [RS:0;dfce6ce5b10c:39501 {}] regionserver.HRegionServer(959): stopping server dfce6ce5b10c,39501,1733762062000 2024-12-09T16:36:10,897 INFO [RS:0;dfce6ce5b10c:39501 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T16:36:10,897 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 6b0538698171d50c35c5f6eb07bf4825, disabling compactions & flushes 2024-12-09T16:36:10,897 INFO [RS:0;dfce6ce5b10c:39501 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;dfce6ce5b10c:39501. 2024-12-09T16:36:10,897 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733762088285.6b0538698171d50c35c5f6eb07bf4825. 2024-12-09T16:36:10,897 DEBUG [RS:0;dfce6ce5b10c:39501 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T16:36:10,897 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733762088285.6b0538698171d50c35c5f6eb07bf4825. 2024-12-09T16:36:10,897 DEBUG [RS:0;dfce6ce5b10c:39501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:36:10,897 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733762088285.6b0538698171d50c35c5f6eb07bf4825. after waiting 0 ms 2024-12-09T16:36:10,897 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733762088285.6b0538698171d50c35c5f6eb07bf4825. 2024-12-09T16:36:10,897 INFO [RS:0;dfce6ce5b10c:39501 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T16:36:10,897 INFO [RS:0;dfce6ce5b10c:39501 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T16:36:10,897 INFO [RS:0;dfce6ce5b10c:39501 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T16:36:10,897 INFO [RS:0;dfce6ce5b10c:39501 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T16:36:10,897 INFO [RS:0;dfce6ce5b10c:39501 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-12-09T16:36:10,897 DEBUG [RS:0;dfce6ce5b10c:39501 {}] regionserver.HRegionServer(1325): Online Regions={6b0538698171d50c35c5f6eb07bf4825=TestLogRolling-testLogRolling,,1733762088285.6b0538698171d50c35c5f6eb07bf4825., b21241e15084e886f8038211bba8302b=TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b., 1588230740=hbase:meta,,1.1588230740} 2024-12-09T16:36:10,898 DEBUG [RS:0;dfce6ce5b10c:39501 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 6b0538698171d50c35c5f6eb07bf4825, b21241e15084e886f8038211bba8302b 2024-12-09T16:36:10,898 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T16:36:10,898 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T16:36:10,898 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T16:36:10,898 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T16:36:10,898 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T16:36:10,898 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733762088285.6b0538698171d50c35c5f6eb07bf4825.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/6b0538698171d50c35c5f6eb07bf4825/info/76b73bfe42ed42db80e015aaecdea91c.8e82fa94a813eccad63b97a5d7c1d816->hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/76b73bfe42ed42db80e015aaecdea91c-bottom] to archive 2024-12-09T16:36:10,899 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733762088285.6b0538698171d50c35c5f6eb07bf4825.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T16:36:10,901 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733762088285.6b0538698171d50c35c5f6eb07bf4825.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/6b0538698171d50c35c5f6eb07bf4825/info/76b73bfe42ed42db80e015aaecdea91c.8e82fa94a813eccad63b97a5d7c1d816 to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/6b0538698171d50c35c5f6eb07bf4825/info/76b73bfe42ed42db80e015aaecdea91c.8e82fa94a813eccad63b97a5d7c1d816 2024-12-09T16:36:10,901 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733762088285.6b0538698171d50c35c5f6eb07bf4825.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=dfce6ce5b10c:45079 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-12-09T16:36:10,901 WARN [StoreCloser-TestLogRolling-testLogRolling,,1733762088285.6b0538698171d50c35c5f6eb07bf4825.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-12-09T16:36:10,915 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-12-09T16:36:10,915 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/6b0538698171d50c35c5f6eb07bf4825/recovered.edits/112.seqid, newMaxSeqId=112, maxSeqId=107 2024-12-09T16:36:10,916 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T16:36:10,916 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733762088285.6b0538698171d50c35c5f6eb07bf4825. 2024-12-09T16:36:10,916 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T16:36:10,916 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 6b0538698171d50c35c5f6eb07bf4825: Waiting for close lock at 1733762170897Running coprocessor pre-close hooks at 1733762170897Disabling compacts and flushes for region at 1733762170897Disabling writes for close at 1733762170897Writing region close event to WAL at 1733762170911 (+14 ms)Running coprocessor post-close hooks at 1733762170915 (+4 ms)Closed at 1733762170916 (+1 ms) 2024-12-09T16:36:10,916 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733762170897Running coprocessor pre-close hooks at 1733762170897Disabling compacts and flushes for region at 1733762170898 (+1 ms)Disabling writes for close at 1733762170898Writing region close event to WAL at 1733762170911 (+13 ms)Running coprocessor post-close hooks at 1733762170916 (+5 ms)Closed at 1733762170916 2024-12-09T16:36:10,916 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T16:36:10,916 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1733762088285.6b0538698171d50c35c5f6eb07bf4825. 2024-12-09T16:36:10,916 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing b21241e15084e886f8038211bba8302b, disabling compactions & flushes 2024-12-09T16:36:10,916 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b. 2024-12-09T16:36:10,916 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b. 2024-12-09T16:36:10,916 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b. after waiting 0 ms 2024-12-09T16:36:10,916 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b. 2024-12-09T16:36:10,917 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/76b73bfe42ed42db80e015aaecdea91c.8e82fa94a813eccad63b97a5d7c1d816->hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/8e82fa94a813eccad63b97a5d7c1d816/info/76b73bfe42ed42db80e015aaecdea91c-top, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/691cdeab72b142cd87762587a9df0754, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/TestLogRolling-testLogRolling=8e82fa94a813eccad63b97a5d7c1d816-76cecf5450df43c1abac9c3037e45e4a, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/23536f4c0dd042a69187b83c79deb4e8, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/0043fb12f6e94ceea43060ab40088782, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/3881775cc8944a9d94a35281315de290, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/ce1e3777c224423e90e6198ca4bd32e8, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/1dcd1fe286d3461c93a024031a37bf2c, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/ed5932e2296649e3a0d06dd00b33f63e, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/777a68b65f9f499091d1868cd7860da8, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/802fcf4c7ece4811af30ed3e1aee5e60, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/9bf76b563f3d418ea4f66d92d9ffd0c9, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/427cc9d6bcab4f0eb8057a456df1f84f, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/3c4a8ed201694edf86c2df69e8d96899, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/9d10981efde646e4b5b18008dcd6c5f5, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/365b06ff738443e0a7995ecb8dd0aaf0, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/d3bd3f1be2d34966930e2bdc204d4f07, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/da94af8a6ae24b9284a14ccd35e78ba6, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/9f900077a3cb44e78564b6aa8ff16363, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/b3cbda4500a94afab8a7a3af9b5c0356, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/86b35e5d02d74fbb9dab6d6f616bfe6f, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/3af190e92ed047468c57bf694ffed749, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/0fc4b4d1c4384856a83721ae367e66b8] to archive 2024-12-09T16:36:10,918 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T16:36:10,919 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/76b73bfe42ed42db80e015aaecdea91c.8e82fa94a813eccad63b97a5d7c1d816 to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/76b73bfe42ed42db80e015aaecdea91c.8e82fa94a813eccad63b97a5d7c1d816 2024-12-09T16:36:10,921 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/691cdeab72b142cd87762587a9df0754 to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/691cdeab72b142cd87762587a9df0754 2024-12-09T16:36:10,922 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/TestLogRolling-testLogRolling=8e82fa94a813eccad63b97a5d7c1d816-76cecf5450df43c1abac9c3037e45e4a to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/TestLogRolling-testLogRolling=8e82fa94a813eccad63b97a5d7c1d816-76cecf5450df43c1abac9c3037e45e4a 2024-12-09T16:36:10,923 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/23536f4c0dd042a69187b83c79deb4e8 to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/23536f4c0dd042a69187b83c79deb4e8 2024-12-09T16:36:10,925 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/0043fb12f6e94ceea43060ab40088782 to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/0043fb12f6e94ceea43060ab40088782 2024-12-09T16:36:10,926 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/3881775cc8944a9d94a35281315de290 to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/3881775cc8944a9d94a35281315de290 2024-12-09T16:36:10,927 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/ce1e3777c224423e90e6198ca4bd32e8 to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/ce1e3777c224423e90e6198ca4bd32e8 2024-12-09T16:36:10,928 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/1dcd1fe286d3461c93a024031a37bf2c to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/1dcd1fe286d3461c93a024031a37bf2c 2024-12-09T16:36:10,929 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/ed5932e2296649e3a0d06dd00b33f63e to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/ed5932e2296649e3a0d06dd00b33f63e 2024-12-09T16:36:10,930 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/777a68b65f9f499091d1868cd7860da8 to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/777a68b65f9f499091d1868cd7860da8 2024-12-09T16:36:10,931 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/802fcf4c7ece4811af30ed3e1aee5e60 to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/802fcf4c7ece4811af30ed3e1aee5e60 2024-12-09T16:36:10,933 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/9bf76b563f3d418ea4f66d92d9ffd0c9 to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/9bf76b563f3d418ea4f66d92d9ffd0c9 2024-12-09T16:36:10,935 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/427cc9d6bcab4f0eb8057a456df1f84f to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/427cc9d6bcab4f0eb8057a456df1f84f 2024-12-09T16:36:10,937 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/3c4a8ed201694edf86c2df69e8d96899 to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/3c4a8ed201694edf86c2df69e8d96899 2024-12-09T16:36:10,938 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/9d10981efde646e4b5b18008dcd6c5f5 to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/9d10981efde646e4b5b18008dcd6c5f5 2024-12-09T16:36:10,939 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/365b06ff738443e0a7995ecb8dd0aaf0 to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/365b06ff738443e0a7995ecb8dd0aaf0 2024-12-09T16:36:10,941 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/d3bd3f1be2d34966930e2bdc204d4f07 to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/d3bd3f1be2d34966930e2bdc204d4f07 2024-12-09T16:36:10,942 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/da94af8a6ae24b9284a14ccd35e78ba6 to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/da94af8a6ae24b9284a14ccd35e78ba6 2024-12-09T16:36:10,944 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/9f900077a3cb44e78564b6aa8ff16363 to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/9f900077a3cb44e78564b6aa8ff16363 2024-12-09T16:36:10,945 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/b3cbda4500a94afab8a7a3af9b5c0356 to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/b3cbda4500a94afab8a7a3af9b5c0356 2024-12-09T16:36:10,947 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/86b35e5d02d74fbb9dab6d6f616bfe6f to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/86b35e5d02d74fbb9dab6d6f616bfe6f 2024-12-09T16:36:10,948 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/3af190e92ed047468c57bf694ffed749 to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/3af190e92ed047468c57bf694ffed749 2024-12-09T16:36:10,950 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/0fc4b4d1c4384856a83721ae367e66b8 to hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/archive/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/info/0fc4b4d1c4384856a83721ae367e66b8 2024-12-09T16:36:10,950 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [691cdeab72b142cd87762587a9df0754=28850, 23536f4c0dd042a69187b83c79deb4e8=12509, 0043fb12f6e94ceea43060ab40088782=44066, 3881775cc8944a9d94a35281315de290=12509, ce1e3777c224423e90e6198ca4bd32e8=29784, 1dcd1fe286d3461c93a024031a37bf2c=76649, ed5932e2296649e3a0d06dd00b33f63e=12516, 777a68b65f9f499091d1868cd7860da8=12516, 802fcf4c7ece4811af30ed3e1aee5e60=96159, 9bf76b563f3d418ea4f66d92d9ffd0c9=16828, 427cc9d6bcab4f0eb8057a456df1f84f=22238, 3c4a8ed201694edf86c2df69e8d96899=128737, 9d10981efde646e4b5b18008dcd6c5f5=20078, 365b06ff738443e0a7995ecb8dd0aaf0=22238, d3bd3f1be2d34966930e2bdc204d4f07=153707, da94af8a6ae24b9284a14ccd35e78ba6=12517, 9f900077a3cb44e78564b6aa8ff16363=16839, b3cbda4500a94afab8a7a3af9b5c0356=182950, 86b35e5d02d74fbb9dab6d6f616bfe6f=22254, 3af190e92ed047468c57bf694ffed749=16839, 0fc4b4d1c4384856a83721ae367e66b8=17918] 2024-12-09T16:36:10,954 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/data/default/TestLogRolling-testLogRolling/b21241e15084e886f8038211bba8302b/recovered.edits/336.seqid, newMaxSeqId=336, maxSeqId=107 2024-12-09T16:36:10,954 INFO [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b. 2024-12-09T16:36:10,954 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for b21241e15084e886f8038211bba8302b: Waiting for close lock at 1733762170916Running coprocessor pre-close hooks at 1733762170916Disabling compacts and flushes for region at 1733762170916Disabling writes for close at 1733762170916Writing region close event to WAL at 1733762170950 (+34 ms)Running coprocessor post-close hooks at 1733762170954 (+4 ms)Closed at 1733762170954 2024-12-09T16:36:10,955 DEBUG [RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1733762088285.b21241e15084e886f8038211bba8302b. 2024-12-09T16:36:11,098 INFO [RS:0;dfce6ce5b10c:39501 {}] regionserver.HRegionServer(976): stopping server dfce6ce5b10c,39501,1733762062000; all regions closed. 2024-12-09T16:36:11,098 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:11,098 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:11,098 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:11,099 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:11,099 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:11,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741834_1010 (size=8107) 2024-12-09T16:36:11,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741834_1010 (size=8107) 2024-12-09T16:36:11,103 DEBUG [RS:0;dfce6ce5b10c:39501 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/oldWALs 2024-12-09T16:36:11,103 INFO [RS:0;dfce6ce5b10c:39501 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog dfce6ce5b10c%2C39501%2C1733762062000.meta:.meta(num 1733762063113) 2024-12-09T16:36:11,103 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:11,103 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:11,103 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:11,103 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:11,103 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:11,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741878_1054 (size=778) 2024-12-09T16:36:11,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741878_1054 (size=778) 2024-12-09T16:36:11,107 DEBUG [RS:0;dfce6ce5b10c:39501 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/oldWALs 2024-12-09T16:36:11,107 INFO [RS:0;dfce6ce5b10c:39501 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog dfce6ce5b10c%2C39501%2C1733762062000:(num 1733762170834) 2024-12-09T16:36:11,107 DEBUG [RS:0;dfce6ce5b10c:39501 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:36:11,107 INFO [RS:0;dfce6ce5b10c:39501 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T16:36:11,107 INFO [RS:0;dfce6ce5b10c:39501 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T16:36:11,108 INFO [RS:0;dfce6ce5b10c:39501 {}] hbase.ChoreService(370): Chore service for: regionserver/dfce6ce5b10c:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T16:36:11,108 INFO [RS:0;dfce6ce5b10c:39501 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T16:36:11,108 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T16:36:11,108 INFO [RS:0;dfce6ce5b10c:39501 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39501 2024-12-09T16:36:11,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39501-0x100bfc794870001, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/dfce6ce5b10c,39501,1733762062000 2024-12-09T16:36:11,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T16:36:11,115 INFO [RS:0;dfce6ce5b10c:39501 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T16:36:11,123 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [dfce6ce5b10c,39501,1733762062000] 2024-12-09T16:36:11,131 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/dfce6ce5b10c,39501,1733762062000 already deleted, retry=false 2024-12-09T16:36:11,131 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; dfce6ce5b10c,39501,1733762062000 expired; onlineServers=0 2024-12-09T16:36:11,131 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'dfce6ce5b10c,45079,1733762061826' ***** 2024-12-09T16:36:11,131 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T16:36:11,131 INFO [M:0;dfce6ce5b10c:45079 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T16:36:11,132 INFO [M:0;dfce6ce5b10c:45079 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T16:36:11,132 DEBUG [M:0;dfce6ce5b10c:45079 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T16:36:11,132 DEBUG [M:0;dfce6ce5b10c:45079 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T16:36:11,132 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T16:36:11,132 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.small.0-1733762062435 {}] cleaner.HFileCleaner(306): Exit Thread[master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.small.0-1733762062435,5,FailOnTimeoutGroup] 2024-12-09T16:36:11,132 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.large.0-1733762062434 {}] cleaner.HFileCleaner(306): Exit Thread[master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.large.0-1733762062434,5,FailOnTimeoutGroup] 2024-12-09T16:36:11,132 INFO [M:0;dfce6ce5b10c:45079 {}] hbase.ChoreService(370): Chore service for: master/dfce6ce5b10c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T16:36:11,132 INFO [M:0;dfce6ce5b10c:45079 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T16:36:11,132 DEBUG [M:0;dfce6ce5b10c:45079 {}] master.HMaster(1795): Stopping service threads 2024-12-09T16:36:11,132 INFO [M:0;dfce6ce5b10c:45079 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T16:36:11,132 INFO [M:0;dfce6ce5b10c:45079 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T16:36:11,132 INFO [M:0;dfce6ce5b10c:45079 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T16:36:11,132 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T16:36:11,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T16:36:11,140 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:36:11,140 DEBUG [M:0;dfce6ce5b10c:45079 {}] zookeeper.ZKUtil(347): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T16:36:11,140 WARN [M:0;dfce6ce5b10c:45079 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T16:36:11,140 INFO [M:0;dfce6ce5b10c:45079 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/.lastflushedseqids 2024-12-09T16:36:11,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741879_1055 (size=228) 2024-12-09T16:36:11,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741879_1055 (size=228) 2024-12-09T16:36:11,150 INFO [M:0;dfce6ce5b10c:45079 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T16:36:11,150 INFO [M:0;dfce6ce5b10c:45079 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T16:36:11,150 DEBUG [M:0;dfce6ce5b10c:45079 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T16:36:11,150 INFO [M:0;dfce6ce5b10c:45079 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:36:11,150 DEBUG [M:0;dfce6ce5b10c:45079 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:36:11,150 DEBUG [M:0;dfce6ce5b10c:45079 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T16:36:11,150 DEBUG [M:0;dfce6ce5b10c:45079 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:36:11,150 INFO [M:0;dfce6ce5b10c:45079 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.41 KB heapSize=63.33 KB 2024-12-09T16:36:11,164 DEBUG [M:0;dfce6ce5b10c:45079 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6a972a5c39c14d569b5550d1fa264e49 is 82, key is hbase:meta,,1/info:regioninfo/1733762063142/Put/seqid=0 2024-12-09T16:36:11,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741880_1056 (size=5672) 2024-12-09T16:36:11,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741880_1056 (size=5672) 2024-12-09T16:36:11,169 INFO [M:0;dfce6ce5b10c:45079 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6a972a5c39c14d569b5550d1fa264e49 2024-12-09T16:36:11,192 DEBUG [M:0;dfce6ce5b10c:45079 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3531feb8f35e44359a420f7cb65abde7 is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733762063703/Put/seqid=0 2024-12-09T16:36:11,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741881_1057 (size=7089) 2024-12-09T16:36:11,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741881_1057 (size=7089) 2024-12-09T16:36:11,197 INFO [M:0;dfce6ce5b10c:45079 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.80 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3531feb8f35e44359a420f7cb65abde7 2024-12-09T16:36:11,201 INFO [M:0;dfce6ce5b10c:45079 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3531feb8f35e44359a420f7cb65abde7 2024-12-09T16:36:11,215 DEBUG [M:0;dfce6ce5b10c:45079 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5818cd5267e94b9194c81f9c53f25cb3 is 69, key is dfce6ce5b10c,39501,1733762062000/rs:state/1733762062514/Put/seqid=0 2024-12-09T16:36:11,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741882_1058 (size=5156) 2024-12-09T16:36:11,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741882_1058 (size=5156) 2024-12-09T16:36:11,221 INFO [M:0;dfce6ce5b10c:45079 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5818cd5267e94b9194c81f9c53f25cb3 2024-12-09T16:36:11,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:11,223 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39501-0x100bfc794870001, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:36:11,223 INFO [RS:0;dfce6ce5b10c:39501 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T16:36:11,223 INFO [RS:0;dfce6ce5b10c:39501 {}] regionserver.HRegionServer(1031): Exiting; stopping=dfce6ce5b10c,39501,1733762062000; zookeeper connection closed. 2024-12-09T16:36:11,223 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39501-0x100bfc794870001, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:36:11,223 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@714bf023 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@714bf023 2024-12-09T16:36:11,223 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T16:36:11,240 DEBUG [M:0;dfce6ce5b10c:45079 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a718cdec548c41a789a159f2fa30905a is 52, key is load_balancer_on/state:d/1733762063275/Put/seqid=0 2024-12-09T16:36:11,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741883_1059 (size=5056) 2024-12-09T16:36:11,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741883_1059 (size=5056) 2024-12-09T16:36:11,246 INFO [M:0;dfce6ce5b10c:45079 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a718cdec548c41a789a159f2fa30905a 2024-12-09T16:36:11,253 DEBUG [M:0;dfce6ce5b10c:45079 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6a972a5c39c14d569b5550d1fa264e49 as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6a972a5c39c14d569b5550d1fa264e49 2024-12-09T16:36:11,257 INFO [M:0;dfce6ce5b10c:45079 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6a972a5c39c14d569b5550d1fa264e49, entries=8, sequenceid=125, filesize=5.5 K 2024-12-09T16:36:11,258 DEBUG [M:0;dfce6ce5b10c:45079 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/3531feb8f35e44359a420f7cb65abde7 as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3531feb8f35e44359a420f7cb65abde7 2024-12-09T16:36:11,262 INFO [M:0;dfce6ce5b10c:45079 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 3531feb8f35e44359a420f7cb65abde7 2024-12-09T16:36:11,262 INFO [M:0;dfce6ce5b10c:45079 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/3531feb8f35e44359a420f7cb65abde7, entries=13, sequenceid=125, filesize=6.9 K 2024-12-09T16:36:11,263 DEBUG [M:0;dfce6ce5b10c:45079 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5818cd5267e94b9194c81f9c53f25cb3 as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5818cd5267e94b9194c81f9c53f25cb3 2024-12-09T16:36:11,267 INFO [M:0;dfce6ce5b10c:45079 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5818cd5267e94b9194c81f9c53f25cb3, entries=1, sequenceid=125, filesize=5.0 K 2024-12-09T16:36:11,268 DEBUG [M:0;dfce6ce5b10c:45079 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a718cdec548c41a789a159f2fa30905a as hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a718cdec548c41a789a159f2fa30905a 2024-12-09T16:36:11,272 INFO [M:0;dfce6ce5b10c:45079 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45027/user/jenkins/test-data/bc009dec-d401-e91d-68c4-2b3ddc286afa/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a718cdec548c41a789a159f2fa30905a, entries=1, sequenceid=125, filesize=4.9 K 2024-12-09T16:36:11,273 INFO [M:0;dfce6ce5b10c:45079 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.41 KB/52639, heapSize ~63.27 KB/64784, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 123ms, sequenceid=125, compaction requested=false 2024-12-09T16:36:11,274 INFO [M:0;dfce6ce5b10c:45079 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:36:11,274 DEBUG [M:0;dfce6ce5b10c:45079 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733762171150Disabling compacts and flushes for region at 1733762171150Disabling writes for close at 1733762171150Obtaining lock to block concurrent updates at 1733762171150Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733762171150Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52639, getHeapSize=64784, getOffHeapSize=0, getCellsCount=148 at 1733762171151 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733762171151Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733762171151Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733762171164 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733762171164Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733762171174 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733762171192 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733762171192Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733762171201 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733762171215 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733762171215Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733762171225 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733762171240 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733762171240Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@351651e0: reopening flushed file at 1733762171252 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@41d3eb2: reopening flushed file at 1733762171257 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2ab46fd2: reopening flushed file at 1733762171263 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@21d9bd30: reopening flushed file at 1733762171267 (+4 ms)Finished flush of dataSize ~51.41 KB/52639, heapSize ~63.27 KB/64784, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 123ms, sequenceid=125, compaction requested=false at 1733762171273 (+6 ms)Writing region close event to WAL at 1733762171274 (+1 ms)Closed at 1733762171274 2024-12-09T16:36:11,275 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:11,275 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:11,275 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:11,275 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:11,275 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:11,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38673 is added to blk_1073741830_1006 (size=61308) 2024-12-09T16:36:11,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37067 is added to blk_1073741830_1006 (size=61308) 2024-12-09T16:36:11,277 INFO [M:0;dfce6ce5b10c:45079 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T16:36:11,277 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T16:36:11,277 INFO [M:0;dfce6ce5b10c:45079 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45079 2024-12-09T16:36:11,277 INFO [M:0;dfce6ce5b10c:45079 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T16:36:11,387 INFO [M:0;dfce6ce5b10c:45079 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T16:36:11,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:36:11,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45079-0x100bfc794870000, quorum=127.0.0.1:63832, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:36:11,418 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6ef289a6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:36:11,419 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a8a455f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:36:11,419 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:36:11,419 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2cd15cd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:36:11,419 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c6d9015{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/hadoop.log.dir/,STOPPED} 2024-12-09T16:36:11,420 WARN [BP-1718649844-172.17.0.2-1733762059663 heartbeating to localhost/127.0.0.1:45027 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T16:36:11,420 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T16:36:11,420 WARN [BP-1718649844-172.17.0.2-1733762059663 heartbeating to localhost/127.0.0.1:45027 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1718649844-172.17.0.2-1733762059663 (Datanode Uuid 1f9d6c93-225b-4ed8-81d1-6a60f6a11598) service to localhost/127.0.0.1:45027 2024-12-09T16:36:11,420 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T16:36:11,421 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/cluster_15a9bf8b-0915-b640-c1aa-6a829e065c94/data/data3/current/BP-1718649844-172.17.0.2-1733762059663 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:36:11,421 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/cluster_15a9bf8b-0915-b640-c1aa-6a829e065c94/data/data4/current/BP-1718649844-172.17.0.2-1733762059663 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:36:11,421 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T16:36:11,425 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1a78718c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:36:11,425 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@693d3430{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:36:11,425 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:36:11,425 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45abd465{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:36:11,425 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10eef1f0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/hadoop.log.dir/,STOPPED} 2024-12-09T16:36:11,426 WARN [BP-1718649844-172.17.0.2-1733762059663 heartbeating to localhost/127.0.0.1:45027 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T16:36:11,426 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T16:36:11,426 WARN [BP-1718649844-172.17.0.2-1733762059663 heartbeating to localhost/127.0.0.1:45027 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1718649844-172.17.0.2-1733762059663 (Datanode Uuid 6ddcb14b-9f49-457a-bd9a-f5a1245b178c) service to localhost/127.0.0.1:45027 2024-12-09T16:36:11,426 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T16:36:11,426 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/cluster_15a9bf8b-0915-b640-c1aa-6a829e065c94/data/data1/current/BP-1718649844-172.17.0.2-1733762059663 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:36:11,427 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/cluster_15a9bf8b-0915-b640-c1aa-6a829e065c94/data/data2/current/BP-1718649844-172.17.0.2-1733762059663 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:36:11,427 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T16:36:11,432 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3e1d4a93{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T16:36:11,432 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4b216a06{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:36:11,433 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:36:11,433 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@50c11e4d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:36:11,433 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@37314cce{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/hadoop.log.dir/,STOPPED} 2024-12-09T16:36:11,439 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T16:36:11,465 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T16:36:11,476 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=232 (was 208) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45027 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/dfce6ce5b10c:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45027 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:45027 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:45027 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45027 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:45027 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45027 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:45027 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45027 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=509 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=444 (was 524), ProcessCount=11 (was 11), AvailableMemoryMB=7022 (was 6611) - AvailableMemoryMB LEAK? - 2024-12-09T16:36:11,483 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=232, OpenFileDescriptor=509, MaxFileDescriptor=1048576, SystemLoadAverage=444, ProcessCount=11, AvailableMemoryMB=7023 2024-12-09T16:36:11,484 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T16:36:11,484 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/hadoop.log.dir so I do NOT create it in target/test-data/73290fae-8369-1942-f14c-82156814674d 2024-12-09T16:36:11,484 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/84ffbd53-a9df-ca1e-39ff-d847e1eb86d4/hadoop.tmp.dir so I do NOT create it in target/test-data/73290fae-8369-1942-f14c-82156814674d 2024-12-09T16:36:11,484 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/cluster_d712565c-4d33-e39c-1bf8-d6af6591f35d, deleteOnExit=true 2024-12-09T16:36:11,484 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T16:36:11,484 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/test.cache.data in system properties and HBase conf 2024-12-09T16:36:11,484 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T16:36:11,484 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/hadoop.log.dir in system properties and HBase conf 2024-12-09T16:36:11,484 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T16:36:11,484 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T16:36:11,484 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T16:36:11,484 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T16:36:11,485 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T16:36:11,485 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T16:36:11,485 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T16:36:11,485 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T16:36:11,485 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T16:36:11,485 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T16:36:11,485 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T16:36:11,485 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T16:36:11,485 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T16:36:11,485 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/nfs.dump.dir in system properties and HBase conf 2024-12-09T16:36:11,485 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/java.io.tmpdir in system properties and HBase conf 2024-12-09T16:36:11,485 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T16:36:11,485 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T16:36:11,485 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T16:36:11,496 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T16:36:11,596 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:11,655 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:11,799 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:36:11,801 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:36:11,803 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:36:11,803 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:36:11,803 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T16:36:11,803 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:36:11,806 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@13195768{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:36:11,806 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@63033be1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:36:11,897 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@75cb2718{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/java.io.tmpdir/jetty-localhost-38779-hadoop-hdfs-3_4_1-tests_jar-_-any-13787042027949889420/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T16:36:11,897 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@66393fdc{HTTP/1.1, (http/1.1)}{localhost:38779} 2024-12-09T16:36:11,897 INFO [Time-limited test {}] server.Server(415): Started @371063ms 2024-12-09T16:36:11,908 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T16:36:12,071 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:36:12,074 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:36:12,074 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:36:12,074 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:36:12,074 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T16:36:12,075 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@241be264{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:36:12,075 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39601996{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:36:12,165 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@50b9a266{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/java.io.tmpdir/jetty-localhost-41535-hadoop-hdfs-3_4_1-tests_jar-_-any-9808065562106767763/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:36:12,165 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@492ada86{HTTP/1.1, (http/1.1)}{localhost:41535} 2024-12-09T16:36:12,165 INFO [Time-limited test {}] server.Server(415): Started @371331ms 2024-12-09T16:36:12,166 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T16:36:12,196 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T16:36:12,198 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T16:36:12,199 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T16:36:12,199 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T16:36:12,199 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T16:36:12,200 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f055e5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/hadoop.log.dir/,AVAILABLE} 2024-12-09T16:36:12,200 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60e21a06{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T16:36:12,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:12,292 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@697b95ad{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/java.io.tmpdir/jetty-localhost-46695-hadoop-hdfs-3_4_1-tests_jar-_-any-1217129820066414997/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:36:12,293 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7cf07d34{HTTP/1.1, (http/1.1)}{localhost:46695} 2024-12-09T16:36:12,293 INFO [Time-limited test {}] server.Server(415): Started @371459ms 2024-12-09T16:36:12,294 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T16:36:12,574 INFO [regionserver/dfce6ce5b10c:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T16:36:12,596 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:12,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:12,969 WARN [Thread-2497 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/cluster_d712565c-4d33-e39c-1bf8-d6af6591f35d/data/data1/current/BP-2138638399-172.17.0.2-1733762171499/current, will proceed with Du for space computation calculation, 2024-12-09T16:36:12,969 WARN [Thread-2498 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/cluster_d712565c-4d33-e39c-1bf8-d6af6591f35d/data/data2/current/BP-2138638399-172.17.0.2-1733762171499/current, will proceed with Du for space computation calculation, 2024-12-09T16:36:12,985 WARN [Thread-2461 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T16:36:12,987 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1db41155b816f5d9 with lease ID 0x5e23962ae4e743f7: Processing first storage report for DS-b8734b11-ee83-4f09-9ca6-6be14c9ebaea from datanode DatanodeRegistration(127.0.0.1:40915, datanodeUuid=cbaaf58d-9b20-41b1-8d3d-4c3f68c2f3ba, infoPort=46211, infoSecurePort=0, ipcPort=40221, storageInfo=lv=-57;cid=testClusterID;nsid=1652172691;c=1733762171499) 2024-12-09T16:36:12,987 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1db41155b816f5d9 with lease ID 0x5e23962ae4e743f7: from storage DS-b8734b11-ee83-4f09-9ca6-6be14c9ebaea node DatanodeRegistration(127.0.0.1:40915, datanodeUuid=cbaaf58d-9b20-41b1-8d3d-4c3f68c2f3ba, infoPort=46211, infoSecurePort=0, ipcPort=40221, storageInfo=lv=-57;cid=testClusterID;nsid=1652172691;c=1733762171499), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:36:12,987 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1db41155b816f5d9 with lease ID 0x5e23962ae4e743f7: Processing first storage report for DS-b3bdbf1a-07ac-4c9e-a5d3-1f7cb3d61f14 from datanode DatanodeRegistration(127.0.0.1:40915, datanodeUuid=cbaaf58d-9b20-41b1-8d3d-4c3f68c2f3ba, infoPort=46211, infoSecurePort=0, ipcPort=40221, storageInfo=lv=-57;cid=testClusterID;nsid=1652172691;c=1733762171499) 2024-12-09T16:36:12,987 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1db41155b816f5d9 with lease ID 0x5e23962ae4e743f7: from storage DS-b3bdbf1a-07ac-4c9e-a5d3-1f7cb3d61f14 node DatanodeRegistration(127.0.0.1:40915, datanodeUuid=cbaaf58d-9b20-41b1-8d3d-4c3f68c2f3ba, infoPort=46211, infoSecurePort=0, ipcPort=40221, storageInfo=lv=-57;cid=testClusterID;nsid=1652172691;c=1733762171499), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:36:13,111 WARN [Thread-2509 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/cluster_d712565c-4d33-e39c-1bf8-d6af6591f35d/data/data4/current/BP-2138638399-172.17.0.2-1733762171499/current, will proceed with Du for space computation calculation, 2024-12-09T16:36:13,111 WARN [Thread-2508 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/cluster_d712565c-4d33-e39c-1bf8-d6af6591f35d/data/data3/current/BP-2138638399-172.17.0.2-1733762171499/current, will proceed with Du for space computation calculation, 2024-12-09T16:36:13,132 WARN [Thread-2484 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T16:36:13,134 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4ef6ddcedfac4d0f with lease ID 0x5e23962ae4e743f8: Processing first storage report for DS-e976ffa8-caf2-4f62-83c3-99528cdd55ea from datanode DatanodeRegistration(127.0.0.1:45529, datanodeUuid=3896793c-9373-4cfd-aab4-9470a2d8fcd6, infoPort=41459, infoSecurePort=0, ipcPort=45817, storageInfo=lv=-57;cid=testClusterID;nsid=1652172691;c=1733762171499) 2024-12-09T16:36:13,135 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4ef6ddcedfac4d0f with lease ID 0x5e23962ae4e743f8: from storage DS-e976ffa8-caf2-4f62-83c3-99528cdd55ea node DatanodeRegistration(127.0.0.1:45529, datanodeUuid=3896793c-9373-4cfd-aab4-9470a2d8fcd6, infoPort=41459, infoSecurePort=0, ipcPort=45817, storageInfo=lv=-57;cid=testClusterID;nsid=1652172691;c=1733762171499), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T16:36:13,135 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4ef6ddcedfac4d0f with lease ID 0x5e23962ae4e743f8: Processing first storage report for DS-73deca2e-08e0-4a4f-83a2-5c1321409193 from datanode DatanodeRegistration(127.0.0.1:45529, datanodeUuid=3896793c-9373-4cfd-aab4-9470a2d8fcd6, infoPort=41459, infoSecurePort=0, ipcPort=45817, storageInfo=lv=-57;cid=testClusterID;nsid=1652172691;c=1733762171499) 2024-12-09T16:36:13,135 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4ef6ddcedfac4d0f with lease ID 0x5e23962ae4e743f8: from storage DS-73deca2e-08e0-4a4f-83a2-5c1321409193 node DatanodeRegistration(127.0.0.1:45529, datanodeUuid=3896793c-9373-4cfd-aab4-9470a2d8fcd6, infoPort=41459, infoSecurePort=0, ipcPort=45817, storageInfo=lv=-57;cid=testClusterID;nsid=1652172691;c=1733762171499), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T16:36:13,221 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d 2024-12-09T16:36:13,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:13,224 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/cluster_d712565c-4d33-e39c-1bf8-d6af6591f35d/zookeeper_0, clientPort=52051, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/cluster_d712565c-4d33-e39c-1bf8-d6af6591f35d/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/cluster_d712565c-4d33-e39c-1bf8-d6af6591f35d/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T16:36:13,225 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52051 2024-12-09T16:36:13,225 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:36:13,227 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:36:13,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741825_1001 (size=7) 2024-12-09T16:36:13,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45529 is added to blk_1073741825_1001 (size=7) 2024-12-09T16:36:13,236 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00 with version=8 2024-12-09T16:36:13,236 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38839/user/jenkins/test-data/3d8ffbbf-8c63-4765-ae7b-0ef110325d9f/hbase-staging 2024-12-09T16:36:13,238 INFO [Time-limited test {}] client.ConnectionUtils(128): master/dfce6ce5b10c:0 server-side Connection retries=45 2024-12-09T16:36:13,238 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:36:13,238 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T16:36:13,238 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T16:36:13,238 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:36:13,238 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T16:36:13,238 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T16:36:13,239 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T16:36:13,239 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39251 2024-12-09T16:36:13,240 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39251 connecting to ZooKeeper ensemble=127.0.0.1:52051 2024-12-09T16:36:13,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:392510x0, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T16:36:13,295 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39251-0x100bfc947e10000 connected 2024-12-09T16:36:13,362 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:36:13,364 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:36:13,366 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:36:13,366 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00, hbase.cluster.distributed=false 2024-12-09T16:36:13,367 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T16:36:13,368 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39251 2024-12-09T16:36:13,368 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39251 2024-12-09T16:36:13,368 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39251 2024-12-09T16:36:13,369 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39251 2024-12-09T16:36:13,369 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39251 2024-12-09T16:36:13,388 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/dfce6ce5b10c:0 server-side Connection retries=45 2024-12-09T16:36:13,388 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:36:13,388 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T16:36:13,389 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T16:36:13,389 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T16:36:13,389 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T16:36:13,389 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T16:36:13,389 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T16:36:13,389 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34735 2024-12-09T16:36:13,391 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34735 connecting to ZooKeeper ensemble=127.0.0.1:52051 2024-12-09T16:36:13,391 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:36:13,393 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:36:13,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:347350x0, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T16:36:13,404 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34735-0x100bfc947e10001 connected 2024-12-09T16:36:13,404 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34735-0x100bfc947e10001, quorum=127.0.0.1:52051, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:36:13,404 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T16:36:13,404 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T16:36:13,405 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34735-0x100bfc947e10001, quorum=127.0.0.1:52051, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T16:36:13,406 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34735-0x100bfc947e10001, quorum=127.0.0.1:52051, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T16:36:13,406 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34735 2024-12-09T16:36:13,406 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34735 2024-12-09T16:36:13,406 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34735 2024-12-09T16:36:13,407 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34735 2024-12-09T16:36:13,407 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34735 2024-12-09T16:36:13,416 DEBUG [M:0;dfce6ce5b10c:39251 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;dfce6ce5b10c:39251 2024-12-09T16:36:13,417 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/dfce6ce5b10c,39251,1733762173238 2024-12-09T16:36:13,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34735-0x100bfc947e10001, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:36:13,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:36:13,420 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/dfce6ce5b10c,39251,1733762173238 2024-12-09T16:36:13,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:36:13,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34735-0x100bfc947e10001, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T16:36:13,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34735-0x100bfc947e10001, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:36:13,429 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T16:36:13,429 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/dfce6ce5b10c,39251,1733762173238 from backup master directory 2024-12-09T16:36:13,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/dfce6ce5b10c,39251,1733762173238 2024-12-09T16:36:13,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34735-0x100bfc947e10001, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:36:13,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T16:36:13,437 WARN [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T16:36:13,437 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=dfce6ce5b10c,39251,1733762173238 2024-12-09T16:36:13,441 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/hbase.id] with ID: 00e126d3-d675-4c36-812a-5336350a3ce8 2024-12-09T16:36:13,441 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/.tmp/hbase.id 2024-12-09T16:36:13,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741826_1002 (size=42) 2024-12-09T16:36:13,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45529 is added to blk_1073741826_1002 (size=42) 2024-12-09T16:36:13,446 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/.tmp/hbase.id]:[hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/hbase.id] 2024-12-09T16:36:13,456 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:36:13,456 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T16:36:13,457 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-09T16:36:13,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34735-0x100bfc947e10001, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:36:13,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:36:13,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45529 is added to blk_1073741827_1003 (size=196) 2024-12-09T16:36:13,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741827_1003 (size=196) 2024-12-09T16:36:13,469 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T16:36:13,470 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T16:36:13,470 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T16:36:13,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45529 is added to blk_1073741828_1004 (size=1189) 2024-12-09T16:36:13,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741828_1004 (size=1189) 2024-12-09T16:36:13,479 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/data/master/store 2024-12-09T16:36:13,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741829_1005 (size=34) 2024-12-09T16:36:13,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45529 is added to blk_1073741829_1005 (size=34) 2024-12-09T16:36:13,485 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:36:13,485 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T16:36:13,485 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:36:13,485 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:36:13,485 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T16:36:13,485 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:36:13,485 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:36:13,485 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733762173485Disabling compacts and flushes for region at 1733762173485Disabling writes for close at 1733762173485Writing region close event to WAL at 1733762173485Closed at 1733762173485 2024-12-09T16:36:13,485 WARN [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/data/master/store/.initializing 2024-12-09T16:36:13,485 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/WALs/dfce6ce5b10c,39251,1733762173238 2024-12-09T16:36:13,487 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=dfce6ce5b10c%2C39251%2C1733762173238, suffix=, logDir=hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/WALs/dfce6ce5b10c,39251,1733762173238, archiveDir=hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/oldWALs, maxLogs=10 2024-12-09T16:36:13,488 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C39251%2C1733762173238.1733762173487 2024-12-09T16:36:13,492 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/WALs/dfce6ce5b10c,39251,1733762173238/dfce6ce5b10c%2C39251%2C1733762173238.1733762173487 2024-12-09T16:36:13,493 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41459:41459),(127.0.0.1/127.0.0.1:46211:46211)] 2024-12-09T16:36:13,493 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T16:36:13,494 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:36:13,494 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:36:13,494 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:36:13,495 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:36:13,496 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T16:36:13,496 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:36:13,496 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:36:13,496 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:36:13,497 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T16:36:13,497 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:36:13,498 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:36:13,498 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:36:13,499 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T16:36:13,499 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:36:13,499 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:36:13,499 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:36:13,500 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T16:36:13,500 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:36:13,500 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T16:36:13,500 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:36:13,501 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:36:13,501 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:36:13,502 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:36:13,502 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:36:13,502 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T16:36:13,504 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T16:36:13,505 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T16:36:13,506 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=769905, jitterRate=-0.021015524864196777}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T16:36:13,506 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733762173494Initializing all the Stores at 1733762173494Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733762173494Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733762173495 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733762173495Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733762173495Cleaning up temporary data from old regions at 1733762173502 (+7 ms)Region opened successfully at 1733762173506 (+4 ms) 2024-12-09T16:36:13,507 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T16:36:13,510 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c109b52, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=dfce6ce5b10c/172.17.0.2:0 2024-12-09T16:36:13,511 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T16:36:13,511 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T16:36:13,511 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T16:36:13,511 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T16:36:13,512 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T16:36:13,512 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T16:36:13,512 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T16:36:13,514 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T16:36:13,515 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T16:36:13,520 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T16:36:13,521 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T16:36:13,521 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T16:36:13,528 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T16:36:13,529 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T16:36:13,530 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T16:36:13,537 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T16:36:13,538 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T16:36:13,545 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T16:36:13,547 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T16:36:13,553 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T16:36:13,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34735-0x100bfc947e10001, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T16:36:13,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34735-0x100bfc947e10001, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:36:13,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T16:36:13,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:36:13,562 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=dfce6ce5b10c,39251,1733762173238, sessionid=0x100bfc947e10000, setting cluster-up flag (Was=false) 2024-12-09T16:36:13,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:36:13,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34735-0x100bfc947e10001, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:36:13,597 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:13,603 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T16:36:13,604 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=dfce6ce5b10c,39251,1733762173238 2024-12-09T16:36:13,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34735-0x100bfc947e10001, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:36:13,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:36:13,645 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T16:36:13,646 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=dfce6ce5b10c,39251,1733762173238 2024-12-09T16:36:13,647 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T16:36:13,649 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T16:36:13,649 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T16:36:13,649 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T16:36:13,649 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: dfce6ce5b10c,39251,1733762173238 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T16:36:13,651 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/dfce6ce5b10c:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:36:13,651 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/dfce6ce5b10c:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:36:13,651 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:36:13,651 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=5, maxPoolSize=5 2024-12-09T16:36:13,651 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/dfce6ce5b10c:0, corePoolSize=10, maxPoolSize=10 2024-12-09T16:36:13,651 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:36:13,651 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=2, maxPoolSize=2 2024-12-09T16:36:13,651 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:36:13,652 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T16:36:13,653 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T16:36:13,653 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733762203653 2024-12-09T16:36:13,653 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T16:36:13,653 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T16:36:13,653 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T16:36:13,653 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T16:36:13,653 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T16:36:13,653 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T16:36:13,653 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:36:13,654 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T16:36:13,654 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T16:36:13,655 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T16:36:13,655 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T16:36:13,655 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T16:36:13,655 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T16:36:13,655 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T16:36:13,656 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.large.0-1733762173655,5,FailOnTimeoutGroup] 2024-12-09T16:36:13,656 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.small.0-1733762173656,5,FailOnTimeoutGroup] 2024-12-09T16:36:13,656 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T16:36:13,656 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T16:36:13,656 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T16:36:13,656 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T16:36:13,656 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:13,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741831_1007 (size=1321) 2024-12-09T16:36:13,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45529 is added to blk_1073741831_1007 (size=1321) 2024-12-09T16:36:13,662 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T16:36:13,662 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00 2024-12-09T16:36:13,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741832_1008 (size=32) 2024-12-09T16:36:13,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45529 is added to blk_1073741832_1008 (size=32) 2024-12-09T16:36:13,709 INFO [RS:0;dfce6ce5b10c:34735 {}] regionserver.HRegionServer(746): ClusterId : 00e126d3-d675-4c36-812a-5336350a3ce8 2024-12-09T16:36:13,709 DEBUG [RS:0;dfce6ce5b10c:34735 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T16:36:13,712 DEBUG [RS:0;dfce6ce5b10c:34735 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T16:36:13,712 DEBUG [RS:0;dfce6ce5b10c:34735 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T16:36:13,721 DEBUG [RS:0;dfce6ce5b10c:34735 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T16:36:13,721 DEBUG [RS:0;dfce6ce5b10c:34735 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b477af3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=dfce6ce5b10c/172.17.0.2:0 2024-12-09T16:36:13,733 DEBUG [RS:0;dfce6ce5b10c:34735 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;dfce6ce5b10c:34735 2024-12-09T16:36:13,733 INFO [RS:0;dfce6ce5b10c:34735 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T16:36:13,733 INFO [RS:0;dfce6ce5b10c:34735 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T16:36:13,733 DEBUG [RS:0;dfce6ce5b10c:34735 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T16:36:13,734 INFO [RS:0;dfce6ce5b10c:34735 {}] regionserver.HRegionServer(2659): reportForDuty to master=dfce6ce5b10c,39251,1733762173238 with port=34735, startcode=1733762173388 2024-12-09T16:36:13,734 DEBUG [RS:0;dfce6ce5b10c:34735 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T16:36:13,737 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50397, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T16:36:13,737 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39251 {}] master.ServerManager(363): Checking decommissioned status of RegionServer dfce6ce5b10c,34735,1733762173388 2024-12-09T16:36:13,737 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39251 {}] master.ServerManager(517): Registering regionserver=dfce6ce5b10c,34735,1733762173388 2024-12-09T16:36:13,739 DEBUG [RS:0;dfce6ce5b10c:34735 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00 2024-12-09T16:36:13,739 DEBUG [RS:0;dfce6ce5b10c:34735 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39293 2024-12-09T16:36:13,739 DEBUG [RS:0;dfce6ce5b10c:34735 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T16:36:13,745 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T16:36:13,745 DEBUG [RS:0;dfce6ce5b10c:34735 {}] zookeeper.ZKUtil(111): regionserver:34735-0x100bfc947e10001, quorum=127.0.0.1:52051, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/dfce6ce5b10c,34735,1733762173388 2024-12-09T16:36:13,745 WARN [RS:0;dfce6ce5b10c:34735 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T16:36:13,746 INFO [RS:0;dfce6ce5b10c:34735 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T16:36:13,746 DEBUG [RS:0;dfce6ce5b10c:34735 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/WALs/dfce6ce5b10c,34735,1733762173388 2024-12-09T16:36:13,746 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [dfce6ce5b10c,34735,1733762173388] 2024-12-09T16:36:13,749 INFO [RS:0;dfce6ce5b10c:34735 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T16:36:13,751 INFO [RS:0;dfce6ce5b10c:34735 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T16:36:13,754 INFO [RS:0;dfce6ce5b10c:34735 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T16:36:13,754 INFO [RS:0;dfce6ce5b10c:34735 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:36:13,754 INFO [RS:0;dfce6ce5b10c:34735 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T16:36:13,755 INFO [RS:0;dfce6ce5b10c:34735 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T16:36:13,755 INFO [RS:0;dfce6ce5b10c:34735 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T16:36:13,755 DEBUG [RS:0;dfce6ce5b10c:34735 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:36:13,755 DEBUG [RS:0;dfce6ce5b10c:34735 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:36:13,755 DEBUG [RS:0;dfce6ce5b10c:34735 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:36:13,755 DEBUG [RS:0;dfce6ce5b10c:34735 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:36:13,755 DEBUG [RS:0;dfce6ce5b10c:34735 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:36:13,755 DEBUG [RS:0;dfce6ce5b10c:34735 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/dfce6ce5b10c:0, corePoolSize=2, maxPoolSize=2 2024-12-09T16:36:13,755 DEBUG [RS:0;dfce6ce5b10c:34735 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:36:13,755 DEBUG [RS:0;dfce6ce5b10c:34735 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:36:13,755 DEBUG [RS:0;dfce6ce5b10c:34735 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:36:13,755 DEBUG [RS:0;dfce6ce5b10c:34735 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:36:13,755 DEBUG [RS:0;dfce6ce5b10c:34735 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:36:13,755 DEBUG [RS:0;dfce6ce5b10c:34735 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/dfce6ce5b10c:0, corePoolSize=1, maxPoolSize=1 2024-12-09T16:36:13,755 DEBUG [RS:0;dfce6ce5b10c:34735 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/dfce6ce5b10c:0, corePoolSize=3, maxPoolSize=3 2024-12-09T16:36:13,755 DEBUG [RS:0;dfce6ce5b10c:34735 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/dfce6ce5b10c:0, corePoolSize=3, maxPoolSize=3 2024-12-09T16:36:13,758 INFO [RS:0;dfce6ce5b10c:34735 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T16:36:13,758 INFO [RS:0;dfce6ce5b10c:34735 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T16:36:13,758 INFO [RS:0;dfce6ce5b10c:34735 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:36:13,758 INFO [RS:0;dfce6ce5b10c:34735 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T16:36:13,758 INFO [RS:0;dfce6ce5b10c:34735 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T16:36:13,758 INFO [RS:0;dfce6ce5b10c:34735 {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,34735,1733762173388-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T16:36:13,769 INFO [RS:0;dfce6ce5b10c:34735 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T16:36:13,769 INFO [RS:0;dfce6ce5b10c:34735 {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,34735,1733762173388-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:36:13,770 INFO [RS:0;dfce6ce5b10c:34735 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:36:13,770 INFO [RS:0;dfce6ce5b10c:34735 {}] regionserver.Replication(171): dfce6ce5b10c,34735,1733762173388 started 2024-12-09T16:36:13,781 INFO [RS:0;dfce6ce5b10c:34735 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:36:13,781 INFO [RS:0;dfce6ce5b10c:34735 {}] regionserver.HRegionServer(1482): Serving as dfce6ce5b10c,34735,1733762173388, RpcServer on dfce6ce5b10c/172.17.0.2:34735, sessionid=0x100bfc947e10001 2024-12-09T16:36:13,781 DEBUG [RS:0;dfce6ce5b10c:34735 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T16:36:13,781 DEBUG [RS:0;dfce6ce5b10c:34735 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager dfce6ce5b10c,34735,1733762173388 2024-12-09T16:36:13,781 DEBUG [RS:0;dfce6ce5b10c:34735 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'dfce6ce5b10c,34735,1733762173388' 2024-12-09T16:36:13,781 DEBUG [RS:0;dfce6ce5b10c:34735 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T16:36:13,782 DEBUG [RS:0;dfce6ce5b10c:34735 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T16:36:13,782 DEBUG [RS:0;dfce6ce5b10c:34735 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T16:36:13,782 DEBUG [RS:0;dfce6ce5b10c:34735 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T16:36:13,782 DEBUG [RS:0;dfce6ce5b10c:34735 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager dfce6ce5b10c,34735,1733762173388 2024-12-09T16:36:13,782 DEBUG [RS:0;dfce6ce5b10c:34735 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'dfce6ce5b10c,34735,1733762173388' 2024-12-09T16:36:13,782 DEBUG [RS:0;dfce6ce5b10c:34735 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T16:36:13,782 DEBUG [RS:0;dfce6ce5b10c:34735 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T16:36:13,782 DEBUG [RS:0;dfce6ce5b10c:34735 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T16:36:13,782 INFO [RS:0;dfce6ce5b10c:34735 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T16:36:13,782 INFO [RS:0;dfce6ce5b10c:34735 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T16:36:13,884 INFO [RS:0;dfce6ce5b10c:34735 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=dfce6ce5b10c%2C34735%2C1733762173388, suffix=, logDir=hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/WALs/dfce6ce5b10c,34735,1733762173388, archiveDir=hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/oldWALs, maxLogs=32 2024-12-09T16:36:13,885 INFO [RS:0;dfce6ce5b10c:34735 {}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C34735%2C1733762173388.1733762173885 2024-12-09T16:36:13,890 INFO [RS:0;dfce6ce5b10c:34735 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/WALs/dfce6ce5b10c,34735,1733762173388/dfce6ce5b10c%2C34735%2C1733762173388.1733762173885 2024-12-09T16:36:13,891 DEBUG [RS:0;dfce6ce5b10c:34735 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46211:46211),(127.0.0.1/127.0.0.1:41459:41459)] 2024-12-09T16:36:14,069 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:36:14,070 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T16:36:14,071 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T16:36:14,071 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:36:14,071 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:36:14,071 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T16:36:14,072 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T16:36:14,072 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:36:14,073 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:36:14,073 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T16:36:14,074 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T16:36:14,074 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:36:14,074 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:36:14,074 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T16:36:14,075 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T16:36:14,075 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:36:14,076 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:36:14,076 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T16:36:14,076 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/data/hbase/meta/1588230740 2024-12-09T16:36:14,076 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/data/hbase/meta/1588230740 2024-12-09T16:36:14,077 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T16:36:14,077 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T16:36:14,078 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T16:36:14,079 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T16:36:14,081 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T16:36:14,081 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=791822, jitterRate=0.006854042410850525}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T16:36:14,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733762174069Initializing all the Stores at 1733762174069Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733762174069Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733762174070 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733762174070Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733762174070Cleaning up temporary data from old regions at 1733762174077 (+7 ms)Region opened successfully at 1733762174082 (+5 ms) 2024-12-09T16:36:14,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T16:36:14,082 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T16:36:14,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T16:36:14,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T16:36:14,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T16:36:14,082 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T16:36:14,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733762174082Disabling compacts and flushes for region at 1733762174082Disabling writes for close at 1733762174082Writing region close event to WAL at 1733762174082Closed at 1733762174082 2024-12-09T16:36:14,083 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T16:36:14,083 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T16:36:14,083 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T16:36:14,084 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T16:36:14,085 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T16:36:14,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/MasterData/WALs/dfce6ce5b10c,39507,1733761912458/dfce6ce5b10c%2C39507%2C1733761912458.1733761913658 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:14,235 DEBUG [dfce6ce5b10c:39251 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T16:36:14,236 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=dfce6ce5b10c,34735,1733762173388 2024-12-09T16:36:14,237 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as dfce6ce5b10c,34735,1733762173388, state=OPENING 2024-12-09T16:36:14,270 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T16:36:14,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34735-0x100bfc947e10001, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:36:14,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:36:14,279 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T16:36:14,279 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:36:14,279 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:36:14,279 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=dfce6ce5b10c,34735,1733762173388}] 2024-12-09T16:36:14,431 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T16:36:14,433 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51475, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T16:36:14,436 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T16:36:14,437 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T16:36:14,438 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=dfce6ce5b10c%2C34735%2C1733762173388.meta, suffix=.meta, logDir=hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/WALs/dfce6ce5b10c,34735,1733762173388, archiveDir=hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/oldWALs, maxLogs=32 2024-12-09T16:36:14,439 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor dfce6ce5b10c%2C34735%2C1733762173388.meta.1733762174439.meta 2024-12-09T16:36:14,444 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/WALs/dfce6ce5b10c,34735,1733762173388/dfce6ce5b10c%2C34735%2C1733762173388.meta.1733762174439.meta 2024-12-09T16:36:14,449 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41459:41459),(127.0.0.1/127.0.0.1:46211:46211)] 2024-12-09T16:36:14,454 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T16:36:14,454 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T16:36:14,454 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T16:36:14,454 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T16:36:14,454 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T16:36:14,454 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T16:36:14,454 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T16:36:14,454 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T16:36:14,456 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T16:36:14,457 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T16:36:14,457 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:36:14,457 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:36:14,457 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T16:36:14,458 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T16:36:14,458 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:36:14,458 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:36:14,459 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T16:36:14,459 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T16:36:14,459 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:36:14,460 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:36:14,460 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T16:36:14,460 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T16:36:14,461 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T16:36:14,461 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T16:36:14,461 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T16:36:14,462 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/data/hbase/meta/1588230740 2024-12-09T16:36:14,462 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/data/hbase/meta/1588230740 2024-12-09T16:36:14,463 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T16:36:14,463 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T16:36:14,464 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T16:36:14,465 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T16:36:14,465 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=767647, jitterRate=-0.023887231945991516}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T16:36:14,465 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T16:36:14,466 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733762174454Writing region info on filesystem at 1733762174454Initializing all the Stores at 1733762174455 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733762174455Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733762174455Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733762174456 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733762174456Cleaning up temporary data from old regions at 1733762174463 (+7 ms)Running coprocessor post-open hooks at 1733762174465 (+2 ms)Region opened successfully at 1733762174466 (+1 ms) 2024-12-09T16:36:14,467 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733762174431 2024-12-09T16:36:14,469 DEBUG [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T16:36:14,469 INFO [RS_OPEN_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T16:36:14,469 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=dfce6ce5b10c,34735,1733762173388 2024-12-09T16:36:14,470 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as dfce6ce5b10c,34735,1733762173388, state=OPEN 2024-12-09T16:36:14,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34735-0x100bfc947e10001, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T16:36:14,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T16:36:14,504 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=dfce6ce5b10c,34735,1733762173388 2024-12-09T16:36:14,504 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:36:14,504 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T16:36:14,506 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T16:36:14,506 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=dfce6ce5b10c,34735,1733762173388 in 225 msec 2024-12-09T16:36:14,508 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T16:36:14,508 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 423 msec 2024-12-09T16:36:14,509 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T16:36:14,509 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T16:36:14,510 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T16:36:14,510 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=dfce6ce5b10c,34735,1733762173388, seqNum=-1] 2024-12-09T16:36:14,510 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T16:36:14,511 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49547, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T16:36:14,515 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 866 msec 2024-12-09T16:36:14,516 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733762174515, completionTime=-1 2024-12-09T16:36:14,516 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T16:36:14,516 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T16:36:14,517 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T16:36:14,517 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733762234517 2024-12-09T16:36:14,517 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733762294517 2024-12-09T16:36:14,517 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-09T16:36:14,518 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,39251,1733762173238-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T16:36:14,518 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,39251,1733762173238-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:36:14,518 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,39251,1733762173238-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:36:14,518 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-dfce6ce5b10c:39251, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:36:14,518 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T16:36:14,518 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T16:36:14,519 DEBUG [master/dfce6ce5b10c:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T16:36:14,521 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.084sec 2024-12-09T16:36:14,521 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T16:36:14,521 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T16:36:14,521 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T16:36:14,522 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T16:36:14,522 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T16:36:14,522 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,39251,1733762173238-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T16:36:14,522 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,39251,1733762173238-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T16:36:14,524 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T16:36:14,524 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T16:36:14,524 INFO [master/dfce6ce5b10c:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=dfce6ce5b10c,39251,1733762173238-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T16:36:14,597 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,34107,1733761915656/dfce6ce5b10c%2C34107%2C1733761915656.1733761916051 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:14,609 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cdda427, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T16:36:14,609 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request dfce6ce5b10c,39251,-1 for getting cluster id 2024-12-09T16:36:14,610 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T16:36:14,611 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '00e126d3-d675-4c36-812a-5336350a3ce8' 2024-12-09T16:36:14,611 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T16:36:14,611 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "00e126d3-d675-4c36-812a-5336350a3ce8" 2024-12-09T16:36:14,611 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f60f7e6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T16:36:14,611 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [dfce6ce5b10c,39251,-1] 2024-12-09T16:36:14,612 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T16:36:14,612 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:36:14,613 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45210, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T16:36:14,613 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7dd4ffe3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T16:36:14,614 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T16:36:14,615 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=dfce6ce5b10c,34735,1733762173388, seqNum=-1] 2024-12-09T16:36:14,615 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T16:36:14,616 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55234, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T16:36:14,617 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=dfce6ce5b10c,39251,1733762173238 2024-12-09T16:36:14,618 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T16:36:14,620 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T16:36:14,621 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T16:36:14,623 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/WALs/test.com,8080,1, archiveDir=hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/oldWALs, maxLogs=32 2024-12-09T16:36:14,623 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733762174623 2024-12-09T16:36:14,628 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/WALs/test.com,8080,1/test.com%2C8080%2C1.1733762174623 2024-12-09T16:36:14,629 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46211:46211),(127.0.0.1/127.0.0.1:41459:41459)] 2024-12-09T16:36:14,629 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733762174629 2024-12-09T16:36:14,634 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:14,634 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:14,634 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:14,634 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:14,634 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:14,634 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/WALs/test.com,8080,1/test.com%2C8080%2C1.1733762174623 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/WALs/test.com,8080,1/test.com%2C8080%2C1.1733762174629 2024-12-09T16:36:14,635 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46211:46211),(127.0.0.1/127.0.0.1:41459:41459)] 2024-12-09T16:36:14,635 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/WALs/test.com,8080,1/test.com%2C8080%2C1.1733762174623 is not closed yet, will try archiving it next time 2024-12-09T16:36:14,636 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:14,636 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:14,636 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:14,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45529 is added to blk_1073741835_1011 (size=93) 2024-12-09T16:36:14,636 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:14,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741835_1011 (size=93) 2024-12-09T16:36:14,636 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:14,637 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/WALs/test.com,8080,1/test.com%2C8080%2C1.1733762174623 to hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/oldWALs/test.com%2C8080%2C1.1733762174623 2024-12-09T16:36:14,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45529 is added to blk_1073741836_1012 (size=93) 2024-12-09T16:36:14,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741836_1012 (size=93) 2024-12-09T16:36:14,639 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/oldWALs 2024-12-09T16:36:14,639 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1733762174629) 2024-12-09T16:36:14,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T16:36:14,640 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T16:36:14,640 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T16:36:14,640 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:36:14,640 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:36:14,640 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T16:36:14,640 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T16:36:14,640 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1193759560, stopped=false 2024-12-09T16:36:14,640 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=dfce6ce5b10c,39251,1733762173238 2024-12-09T16:36:14,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T16:36:14,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34735-0x100bfc947e10001, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T16:36:14,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34735-0x100bfc947e10001, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:36:14,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:36:14,653 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T16:36:14,654 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T16:36:14,654 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T16:36:14,654 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:36:14,654 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:36:14,654 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'dfce6ce5b10c,34735,1733762173388' ***** 2024-12-09T16:36:14,654 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T16:36:14,654 INFO [RS:0;dfce6ce5b10c:34735 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T16:36:14,654 INFO [RS:0;dfce6ce5b10c:34735 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T16:36:14,654 INFO [RS:0;dfce6ce5b10c:34735 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T16:36:14,654 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T16:36:14,654 INFO [RS:0;dfce6ce5b10c:34735 {}] regionserver.HRegionServer(959): stopping server dfce6ce5b10c,34735,1733762173388 2024-12-09T16:36:14,654 INFO [RS:0;dfce6ce5b10c:34735 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T16:36:14,654 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34735-0x100bfc947e10001, quorum=127.0.0.1:52051, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T16:36:14,654 INFO [RS:0;dfce6ce5b10c:34735 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;dfce6ce5b10c:34735. 2024-12-09T16:36:14,654 DEBUG [RS:0;dfce6ce5b10c:34735 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T16:36:14,654 DEBUG [RS:0;dfce6ce5b10c:34735 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:36:14,655 INFO [RS:0;dfce6ce5b10c:34735 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T16:36:14,655 INFO [RS:0;dfce6ce5b10c:34735 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T16:36:14,655 INFO [RS:0;dfce6ce5b10c:34735 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T16:36:14,655 INFO [RS:0;dfce6ce5b10c:34735 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T16:36:14,655 INFO [RS:0;dfce6ce5b10c:34735 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T16:36:14,655 DEBUG [RS:0;dfce6ce5b10c:34735 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-09T16:36:14,655 DEBUG [RS:0;dfce6ce5b10c:34735 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T16:36:14,655 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T16:36:14,655 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T16:36:14,655 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T16:36:14,655 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T16:36:14,655 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T16:36:14,655 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-09T16:36:14,657 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38821/user/jenkins/test-data/5a2e930b-b7e6-0fd6-146b-7de118604b52/WALs/dfce6ce5b10c,41215,1733761912669/dfce6ce5b10c%2C41215%2C1733761912669.meta.1733761915111.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor118.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T16:36:14,669 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/data/hbase/meta/1588230740/.tmp/ns/33272f9f0fbc49e5b6ec421531e57843 is 43, key is default/ns:d/1733762174511/Put/seqid=0 2024-12-09T16:36:14,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45529 is added to blk_1073741837_1013 (size=5153) 2024-12-09T16:36:14,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741837_1013 (size=5153) 2024-12-09T16:36:14,674 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/data/hbase/meta/1588230740/.tmp/ns/33272f9f0fbc49e5b6ec421531e57843 2024-12-09T16:36:14,679 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/data/hbase/meta/1588230740/.tmp/ns/33272f9f0fbc49e5b6ec421531e57843 as hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/data/hbase/meta/1588230740/ns/33272f9f0fbc49e5b6ec421531e57843 2024-12-09T16:36:14,684 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/data/hbase/meta/1588230740/ns/33272f9f0fbc49e5b6ec421531e57843, entries=2, sequenceid=6, filesize=5.0 K 2024-12-09T16:36:14,685 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false 2024-12-09T16:36:14,689 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T16:36:14,689 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T16:36:14,689 INFO [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T16:36:14,689 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733762174655Running coprocessor pre-close hooks at 1733762174655Disabling compacts and flushes for region at 1733762174655Disabling writes for close at 1733762174655Obtaining lock to block concurrent updates at 1733762174655Preparing flush snapshotting stores in 1588230740 at 1733762174655Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733762174655Flushing stores of hbase:meta,,1.1588230740 at 1733762174656 (+1 ms)Flushing 1588230740/ns: creating writer at 1733762174656Flushing 1588230740/ns: appending metadata at 1733762174669 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1733762174669Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@33d7da06: reopening flushed file at 1733762174678 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false at 1733762174685 (+7 ms)Writing region close event to WAL at 1733762174685Running coprocessor post-close hooks at 1733762174689 (+4 ms)Closed at 1733762174689 2024-12-09T16:36:14,689 DEBUG [RS_CLOSE_META-regionserver/dfce6ce5b10c:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T16:36:14,797 INFO [regionserver/dfce6ce5b10c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T16:36:14,797 INFO [regionserver/dfce6ce5b10c:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T16:36:14,855 INFO [RS:0;dfce6ce5b10c:34735 {}] regionserver.HRegionServer(976): stopping server dfce6ce5b10c,34735,1733762173388; all regions closed. 2024-12-09T16:36:14,856 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:14,856 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:14,856 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:14,856 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:14,856 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:14,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741834_1010 (size=1152) 2024-12-09T16:36:14,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45529 is added to blk_1073741834_1010 (size=1152) 2024-12-09T16:36:14,860 DEBUG [RS:0;dfce6ce5b10c:34735 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/oldWALs 2024-12-09T16:36:14,860 INFO [RS:0;dfce6ce5b10c:34735 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog dfce6ce5b10c%2C34735%2C1733762173388.meta:.meta(num 1733762174439) 2024-12-09T16:36:14,860 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:14,860 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:14,860 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:14,860 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:14,860 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:14,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45529 is added to blk_1073741833_1009 (size=93) 2024-12-09T16:36:14,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741833_1009 (size=93) 2024-12-09T16:36:14,864 DEBUG [RS:0;dfce6ce5b10c:34735 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/oldWALs 2024-12-09T16:36:14,864 INFO [RS:0;dfce6ce5b10c:34735 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog dfce6ce5b10c%2C34735%2C1733762173388:(num 1733762173885) 2024-12-09T16:36:14,864 DEBUG [RS:0;dfce6ce5b10c:34735 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T16:36:14,864 INFO [RS:0;dfce6ce5b10c:34735 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T16:36:14,864 INFO [RS:0;dfce6ce5b10c:34735 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T16:36:14,864 INFO [RS:0;dfce6ce5b10c:34735 {}] hbase.ChoreService(370): Chore service for: regionserver/dfce6ce5b10c:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T16:36:14,864 INFO [RS:0;dfce6ce5b10c:34735 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T16:36:14,864 INFO [regionserver/dfce6ce5b10c:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T16:36:14,864 INFO [RS:0;dfce6ce5b10c:34735 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34735 2024-12-09T16:36:14,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T16:36:14,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34735-0x100bfc947e10001, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/dfce6ce5b10c,34735,1733762173388 2024-12-09T16:36:14,870 INFO [RS:0;dfce6ce5b10c:34735 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T16:36:14,878 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [dfce6ce5b10c,34735,1733762173388] 2024-12-09T16:36:14,886 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/dfce6ce5b10c,34735,1733762173388 already deleted, retry=false 2024-12-09T16:36:14,887 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; dfce6ce5b10c,34735,1733762173388 expired; onlineServers=0 2024-12-09T16:36:14,887 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'dfce6ce5b10c,39251,1733762173238' ***** 2024-12-09T16:36:14,887 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T16:36:14,887 INFO [M:0;dfce6ce5b10c:39251 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T16:36:14,887 INFO [M:0;dfce6ce5b10c:39251 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T16:36:14,887 DEBUG [M:0;dfce6ce5b10c:39251 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T16:36:14,887 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T16:36:14,887 DEBUG [M:0;dfce6ce5b10c:39251 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T16:36:14,887 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.small.0-1733762173656 {}] cleaner.HFileCleaner(306): Exit Thread[master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.small.0-1733762173656,5,FailOnTimeoutGroup] 2024-12-09T16:36:14,887 DEBUG [master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.large.0-1733762173655 {}] cleaner.HFileCleaner(306): Exit Thread[master/dfce6ce5b10c:0:becomeActiveMaster-HFileCleaner.large.0-1733762173655,5,FailOnTimeoutGroup] 2024-12-09T16:36:14,887 INFO [M:0;dfce6ce5b10c:39251 {}] hbase.ChoreService(370): Chore service for: master/dfce6ce5b10c:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T16:36:14,887 INFO [M:0;dfce6ce5b10c:39251 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T16:36:14,887 DEBUG [M:0;dfce6ce5b10c:39251 {}] master.HMaster(1795): Stopping service threads 2024-12-09T16:36:14,887 INFO [M:0;dfce6ce5b10c:39251 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T16:36:14,887 INFO [M:0;dfce6ce5b10c:39251 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T16:36:14,887 INFO [M:0;dfce6ce5b10c:39251 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T16:36:14,887 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T16:36:14,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T16:36:14,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T16:36:14,895 DEBUG [M:0;dfce6ce5b10c:39251 {}] zookeeper.ZKUtil(347): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T16:36:14,895 WARN [M:0;dfce6ce5b10c:39251 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T16:36:14,896 INFO [M:0;dfce6ce5b10c:39251 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/.lastflushedseqids 2024-12-09T16:36:14,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45529 is added to blk_1073741838_1014 (size=99) 2024-12-09T16:36:14,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741838_1014 (size=99) 2024-12-09T16:36:14,904 INFO [M:0;dfce6ce5b10c:39251 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T16:36:14,904 INFO [M:0;dfce6ce5b10c:39251 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T16:36:14,904 DEBUG [M:0;dfce6ce5b10c:39251 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T16:36:14,904 INFO [M:0;dfce6ce5b10c:39251 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:36:14,904 DEBUG [M:0;dfce6ce5b10c:39251 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:36:14,904 DEBUG [M:0;dfce6ce5b10c:39251 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T16:36:14,904 DEBUG [M:0;dfce6ce5b10c:39251 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:36:14,904 INFO [M:0;dfce6ce5b10c:39251 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-09T16:36:14,919 DEBUG [M:0;dfce6ce5b10c:39251 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5a67db2a545f4fdf952539a368f1d1e3 is 82, key is hbase:meta,,1/info:regioninfo/1733762174469/Put/seqid=0 2024-12-09T16:36:14,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45529 is added to blk_1073741839_1015 (size=5672) 2024-12-09T16:36:14,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741839_1015 (size=5672) 2024-12-09T16:36:14,924 INFO [M:0;dfce6ce5b10c:39251 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5a67db2a545f4fdf952539a368f1d1e3 2024-12-09T16:36:14,939 DEBUG [M:0;dfce6ce5b10c:39251 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/527952082c8146829190233aec0ea540 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733762174515/Put/seqid=0 2024-12-09T16:36:14,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45529 is added to blk_1073741840_1016 (size=5275) 2024-12-09T16:36:14,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741840_1016 (size=5275) 2024-12-09T16:36:14,944 INFO [M:0;dfce6ce5b10c:39251 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/527952082c8146829190233aec0ea540 2024-12-09T16:36:14,960 DEBUG [M:0;dfce6ce5b10c:39251 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/62bb52820d134e8cbd87345bb874e8cb is 69, key is dfce6ce5b10c,34735,1733762173388/rs:state/1733762173737/Put/seqid=0 2024-12-09T16:36:14,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741841_1017 (size=5156) 2024-12-09T16:36:14,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45529 is added to blk_1073741841_1017 (size=5156) 2024-12-09T16:36:14,964 INFO [M:0;dfce6ce5b10c:39251 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/62bb52820d134e8cbd87345bb874e8cb 2024-12-09T16:36:14,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34735-0x100bfc947e10001, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:36:14,979 INFO [RS:0;dfce6ce5b10c:34735 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T16:36:14,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34735-0x100bfc947e10001, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:36:14,979 INFO [RS:0;dfce6ce5b10c:34735 {}] regionserver.HRegionServer(1031): Exiting; stopping=dfce6ce5b10c,34735,1733762173388; zookeeper connection closed. 2024-12-09T16:36:14,979 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5dce27ba {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5dce27ba 2024-12-09T16:36:14,979 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T16:36:14,982 DEBUG [M:0;dfce6ce5b10c:39251 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/061fed74d32842c089643c390dd1a67b is 52, key is load_balancer_on/state:d/1733762174619/Put/seqid=0 2024-12-09T16:36:14,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741842_1018 (size=5056) 2024-12-09T16:36:14,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45529 is added to blk_1073741842_1018 (size=5056) 2024-12-09T16:36:14,987 INFO [M:0;dfce6ce5b10c:39251 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/061fed74d32842c089643c390dd1a67b 2024-12-09T16:36:14,993 DEBUG [M:0;dfce6ce5b10c:39251 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5a67db2a545f4fdf952539a368f1d1e3 as hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5a67db2a545f4fdf952539a368f1d1e3 2024-12-09T16:36:14,999 INFO [M:0;dfce6ce5b10c:39251 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5a67db2a545f4fdf952539a368f1d1e3, entries=8, sequenceid=29, filesize=5.5 K 2024-12-09T16:36:15,000 DEBUG [M:0;dfce6ce5b10c:39251 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/527952082c8146829190233aec0ea540 as hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/527952082c8146829190233aec0ea540 2024-12-09T16:36:15,005 INFO [M:0;dfce6ce5b10c:39251 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/527952082c8146829190233aec0ea540, entries=3, sequenceid=29, filesize=5.2 K 2024-12-09T16:36:15,006 DEBUG [M:0;dfce6ce5b10c:39251 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/62bb52820d134e8cbd87345bb874e8cb as hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/62bb52820d134e8cbd87345bb874e8cb 2024-12-09T16:36:15,011 INFO [M:0;dfce6ce5b10c:39251 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/62bb52820d134e8cbd87345bb874e8cb, entries=1, sequenceid=29, filesize=5.0 K 2024-12-09T16:36:15,012 DEBUG [M:0;dfce6ce5b10c:39251 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/061fed74d32842c089643c390dd1a67b as hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/061fed74d32842c089643c390dd1a67b 2024-12-09T16:36:15,016 INFO [M:0;dfce6ce5b10c:39251 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39293/user/jenkins/test-data/787a6090-3820-bb6a-faaf-b4491cd2df00/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/061fed74d32842c089643c390dd1a67b, entries=1, sequenceid=29, filesize=4.9 K 2024-12-09T16:36:15,018 INFO [M:0;dfce6ce5b10c:39251 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 113ms, sequenceid=29, compaction requested=false 2024-12-09T16:36:15,019 INFO [M:0;dfce6ce5b10c:39251 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T16:36:15,019 DEBUG [M:0;dfce6ce5b10c:39251 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733762174904Disabling compacts and flushes for region at 1733762174904Disabling writes for close at 1733762174904Obtaining lock to block concurrent updates at 1733762174904Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733762174904Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733762174905 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733762174905Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733762174905Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733762174918 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733762174918Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733762174927 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733762174939 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733762174939Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733762174947 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733762174960 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733762174960Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733762174969 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733762174982 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733762174982Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@619c7fa6: reopening flushed file at 1733762174992 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@587bee31: reopening flushed file at 1733762174999 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@274748ff: reopening flushed file at 1733762175005 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6c0746c2: reopening flushed file at 1733762175011 (+6 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 113ms, sequenceid=29, compaction requested=false at 1733762175018 (+7 ms)Writing region close event to WAL at 1733762175019 (+1 ms)Closed at 1733762175019 2024-12-09T16:36:15,019 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:15,020 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:15,020 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:15,020 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:15,020 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T16:36:15,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741830_1006 (size=10311) 2024-12-09T16:36:15,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45529 is added to blk_1073741830_1006 (size=10311) 2024-12-09T16:36:15,023 INFO [M:0;dfce6ce5b10c:39251 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T16:36:15,023 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T16:36:15,023 INFO [M:0;dfce6ce5b10c:39251 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39251 2024-12-09T16:36:15,023 INFO [M:0;dfce6ce5b10c:39251 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T16:36:15,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:36:15,128 INFO [M:0;dfce6ce5b10c:39251 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T16:36:15,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39251-0x100bfc947e10000, quorum=127.0.0.1:52051, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T16:36:15,130 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@697b95ad{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:36:15,131 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7cf07d34{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:36:15,131 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:36:15,131 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60e21a06{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:36:15,131 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f055e5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/hadoop.log.dir/,STOPPED} 2024-12-09T16:36:15,132 WARN [BP-2138638399-172.17.0.2-1733762171499 heartbeating to localhost/127.0.0.1:39293 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T16:36:15,132 WARN [BP-2138638399-172.17.0.2-1733762171499 heartbeating to localhost/127.0.0.1:39293 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2138638399-172.17.0.2-1733762171499 (Datanode Uuid 3896793c-9373-4cfd-aab4-9470a2d8fcd6) service to localhost/127.0.0.1:39293 2024-12-09T16:36:15,132 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T16:36:15,132 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T16:36:15,132 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/cluster_d712565c-4d33-e39c-1bf8-d6af6591f35d/data/data3/current/BP-2138638399-172.17.0.2-1733762171499 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:36:15,133 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/cluster_d712565c-4d33-e39c-1bf8-d6af6591f35d/data/data4/current/BP-2138638399-172.17.0.2-1733762171499 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:36:15,133 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T16:36:15,134 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@50b9a266{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T16:36:15,134 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@492ada86{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:36:15,135 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:36:15,135 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39601996{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:36:15,135 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@241be264{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/hadoop.log.dir/,STOPPED} 2024-12-09T16:36:15,135 WARN [BP-2138638399-172.17.0.2-1733762171499 heartbeating to localhost/127.0.0.1:39293 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T16:36:15,136 WARN [BP-2138638399-172.17.0.2-1733762171499 heartbeating to localhost/127.0.0.1:39293 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2138638399-172.17.0.2-1733762171499 (Datanode Uuid cbaaf58d-9b20-41b1-8d3d-4c3f68c2f3ba) service to localhost/127.0.0.1:39293 2024-12-09T16:36:15,136 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/cluster_d712565c-4d33-e39c-1bf8-d6af6591f35d/data/data1/current/BP-2138638399-172.17.0.2-1733762171499 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:36:15,136 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/cluster_d712565c-4d33-e39c-1bf8-d6af6591f35d/data/data2/current/BP-2138638399-172.17.0.2-1733762171499 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T16:36:15,136 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T16:36:15,136 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T16:36:15,136 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T16:36:15,141 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@75cb2718{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T16:36:15,141 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@66393fdc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T16:36:15,141 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T16:36:15,142 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@63033be1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T16:36:15,142 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@13195768{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/73290fae-8369-1942-f14c-82156814674d/hadoop.log.dir/,STOPPED} 2024-12-09T16:36:15,146 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T16:36:15,159 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T16:36:15,168 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=270 (was 232) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:39293 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39293 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39293 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:39293 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:39293 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:39293 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39293 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39293 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=536 (was 509) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=444 (was 444), ProcessCount=11 (was 11), AvailableMemoryMB=6999 (was 7023)