2024-11-18 22:40:07,208 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@69c81773 2024-11-18 22:40:07,222 main DEBUG Took 0.011967 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-18 22:40:07,222 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-18 22:40:07,223 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-18 22:40:07,224 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-18 22:40:07,225 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 22:40:07,235 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-18 22:40:07,253 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 22:40:07,254 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 22:40:07,256 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 22:40:07,256 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 22:40:07,257 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 22:40:07,257 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 22:40:07,258 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 22:40:07,258 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 22:40:07,259 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 22:40:07,259 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 22:40:07,260 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 22:40:07,260 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 22:40:07,261 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 22:40:07,261 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 22:40:07,261 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 22:40:07,262 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 22:40:07,262 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 22:40:07,263 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 22:40:07,263 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 22:40:07,263 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 22:40:07,264 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 22:40:07,264 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 22:40:07,264 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 22:40:07,265 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-18 22:40:07,265 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 22:40:07,265 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-18 22:40:07,267 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-18 22:40:07,268 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-18 22:40:07,270 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-18 22:40:07,271 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-18 22:40:07,272 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-18 22:40:07,273 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-18 22:40:07,282 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-18 22:40:07,285 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-18 22:40:07,287 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-18 22:40:07,288 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-18 22:40:07,288 main DEBUG createAppenders(={Console}) 2024-11-18 22:40:07,290 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@69c81773 initialized 2024-11-18 22:40:07,290 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@69c81773 2024-11-18 22:40:07,291 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@69c81773 OK. 2024-11-18 22:40:07,292 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-18 22:40:07,293 main DEBUG OutputStream closed 2024-11-18 22:40:07,293 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-18 22:40:07,294 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-18 22:40:07,294 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@3e11f9e9 OK 2024-11-18 22:40:07,402 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-18 22:40:07,406 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-18 22:40:07,407 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-18 22:40:07,409 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-18 22:40:07,410 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-18 22:40:07,411 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-18 22:40:07,411 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-18 22:40:07,412 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-18 22:40:07,413 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-18 22:40:07,413 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-18 22:40:07,414 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-18 22:40:07,414 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-18 22:40:07,415 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-18 22:40:07,415 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-18 22:40:07,416 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-18 22:40:07,416 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-18 22:40:07,417 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-18 22:40:07,418 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-18 22:40:07,422 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-18 22:40:07,422 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@21fd5faa) with optional ClassLoader: null 2024-11-18 22:40:07,423 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-18 22:40:07,424 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@21fd5faa] started OK. 2024-11-18T22:40:07,772 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795 2024-11-18 22:40:07,776 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-18 22:40:07,777 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-18T22:40:07,789 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-18T22:40:07,840 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=1245, ProcessCount=11, AvailableMemoryMB=7625 2024-11-18T22:40:07,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T22:40:07,872 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/cluster_0f0fca93-73ab-52fd-f44b-0daa69ff18bd, deleteOnExit=true 2024-11-18T22:40:07,873 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T22:40:07,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/test.cache.data in system properties and HBase conf 2024-11-18T22:40:07,876 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T22:40:07,881 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/hadoop.log.dir in system properties and HBase conf 2024-11-18T22:40:07,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T22:40:07,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T22:40:07,887 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T22:40:08,017 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-18T22:40:08,144 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T22:40:08,155 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T22:40:08,157 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T22:40:08,157 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T22:40:08,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T22:40:08,159 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T22:40:08,159 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T22:40:08,160 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T22:40:08,161 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T22:40:08,162 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T22:40:08,162 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/nfs.dump.dir in system properties and HBase conf 2024-11-18T22:40:08,163 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/java.io.tmpdir in system properties and HBase conf 2024-11-18T22:40:08,164 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T22:40:08,165 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T22:40:08,165 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T22:40:08,788 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T22:40:09,684 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-18T22:40:09,882 INFO [Time-limited test {}] log.Log(170): Logging initialized @3680ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-18T22:40:10,015 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:40:10,142 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:40:10,269 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:40:10,270 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:40:10,272 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T22:40:10,306 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:40:10,332 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3aee6cb7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:40:10,340 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@380b8195{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:40:10,790 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6de997b9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/java.io.tmpdir/jetty-localhost-39241-hadoop-hdfs-3_4_1-tests_jar-_-any-472125763076220370/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T22:40:10,808 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7a0da00a{HTTP/1.1, (http/1.1)}{localhost:39241} 2024-11-18T22:40:10,808 INFO [Time-limited test {}] server.Server(415): Started @4608ms 2024-11-18T22:40:10,929 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T22:40:12,305 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:40:12,323 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:40:12,348 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:40:12,348 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:40:12,349 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T22:40:12,360 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@26c88bf4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:40:12,362 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@621a7cbc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:40:12,548 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@11e88411{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/java.io.tmpdir/jetty-localhost-45553-hadoop-hdfs-3_4_1-tests_jar-_-any-5313479000223052823/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:40:12,550 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@75423500{HTTP/1.1, (http/1.1)}{localhost:45553} 2024-11-18T22:40:12,552 INFO [Time-limited test {}] server.Server(415): Started @6351ms 2024-11-18T22:40:12,657 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T22:40:13,050 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:40:13,089 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:40:13,110 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:40:13,110 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:40:13,110 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T22:40:13,113 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@43794ae7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:40:13,114 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35f1cf70{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:40:13,298 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@590b36b7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/java.io.tmpdir/jetty-localhost-38867-hadoop-hdfs-3_4_1-tests_jar-_-any-7146033723360151561/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:40:13,300 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@237fc06a{HTTP/1.1, (http/1.1)}{localhost:38867} 2024-11-18T22:40:13,301 INFO [Time-limited test {}] server.Server(415): Started @7100ms 2024-11-18T22:40:13,304 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T22:40:14,554 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/cluster_0f0fca93-73ab-52fd-f44b-0daa69ff18bd/data/data1/current/BP-749680845-172.17.0.2-1731969608935/current, will proceed with Du for space computation calculation, 2024-11-18T22:40:14,554 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/cluster_0f0fca93-73ab-52fd-f44b-0daa69ff18bd/data/data2/current/BP-749680845-172.17.0.2-1731969608935/current, will proceed with Du for space computation calculation, 2024-11-18T22:40:14,636 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T22:40:14,664 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/cluster_0f0fca93-73ab-52fd-f44b-0daa69ff18bd/data/data3/current/BP-749680845-172.17.0.2-1731969608935/current, will proceed with Du for space computation calculation, 2024-11-18T22:40:14,665 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/cluster_0f0fca93-73ab-52fd-f44b-0daa69ff18bd/data/data4/current/BP-749680845-172.17.0.2-1731969608935/current, will proceed with Du for space computation calculation, 2024-11-18T22:40:14,716 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T22:40:14,730 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc47a6763d1edee21 with lease ID 0x8ef681cafe4c9974: Processing first storage report for DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa from datanode DatanodeRegistration(127.0.0.1:43407, datanodeUuid=48e5bdc0-8c30-4e82-bdad-78131f228cbc, infoPort=38809, infoSecurePort=0, ipcPort=34897, storageInfo=lv=-57;cid=testClusterID;nsid=1662248103;c=1731969608935) 2024-11-18T22:40:14,732 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc47a6763d1edee21 with lease ID 0x8ef681cafe4c9974: from storage DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa node DatanodeRegistration(127.0.0.1:43407, datanodeUuid=48e5bdc0-8c30-4e82-bdad-78131f228cbc, infoPort=38809, infoSecurePort=0, ipcPort=34897, storageInfo=lv=-57;cid=testClusterID;nsid=1662248103;c=1731969608935), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-18T22:40:14,733 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc47a6763d1edee21 with lease ID 0x8ef681cafe4c9974: Processing first storage report for DS-350fee63-40de-44a7-99f1-98d86e503cf1 from datanode DatanodeRegistration(127.0.0.1:43407, datanodeUuid=48e5bdc0-8c30-4e82-bdad-78131f228cbc, infoPort=38809, infoSecurePort=0, ipcPort=34897, storageInfo=lv=-57;cid=testClusterID;nsid=1662248103;c=1731969608935) 2024-11-18T22:40:14,733 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc47a6763d1edee21 with lease ID 0x8ef681cafe4c9974: from storage DS-350fee63-40de-44a7-99f1-98d86e503cf1 node DatanodeRegistration(127.0.0.1:43407, datanodeUuid=48e5bdc0-8c30-4e82-bdad-78131f228cbc, infoPort=38809, infoSecurePort=0, ipcPort=34897, storageInfo=lv=-57;cid=testClusterID;nsid=1662248103;c=1731969608935), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T22:40:14,736 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2839300deec2c3ec with lease ID 0x8ef681cafe4c9975: Processing first storage report for DS-c58de840-e8cf-4ead-819b-788d98cfed23 from datanode DatanodeRegistration(127.0.0.1:46783, datanodeUuid=ac5ea8cc-5c6e-4d1a-8496-70c70c3ab01a, infoPort=34341, infoSecurePort=0, ipcPort=33523, storageInfo=lv=-57;cid=testClusterID;nsid=1662248103;c=1731969608935) 2024-11-18T22:40:14,736 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2839300deec2c3ec with lease ID 0x8ef681cafe4c9975: from storage DS-c58de840-e8cf-4ead-819b-788d98cfed23 node DatanodeRegistration(127.0.0.1:46783, datanodeUuid=ac5ea8cc-5c6e-4d1a-8496-70c70c3ab01a, infoPort=34341, infoSecurePort=0, ipcPort=33523, storageInfo=lv=-57;cid=testClusterID;nsid=1662248103;c=1731969608935), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T22:40:14,738 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2839300deec2c3ec with lease ID 0x8ef681cafe4c9975: Processing first storage report for DS-72728a01-f447-49f6-9521-79fb7ddaed56 from datanode DatanodeRegistration(127.0.0.1:46783, datanodeUuid=ac5ea8cc-5c6e-4d1a-8496-70c70c3ab01a, infoPort=34341, infoSecurePort=0, ipcPort=33523, storageInfo=lv=-57;cid=testClusterID;nsid=1662248103;c=1731969608935) 2024-11-18T22:40:14,738 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2839300deec2c3ec with lease ID 0x8ef681cafe4c9975: from storage DS-72728a01-f447-49f6-9521-79fb7ddaed56 node DatanodeRegistration(127.0.0.1:46783, datanodeUuid=ac5ea8cc-5c6e-4d1a-8496-70c70c3ab01a, infoPort=34341, infoSecurePort=0, ipcPort=33523, storageInfo=lv=-57;cid=testClusterID;nsid=1662248103;c=1731969608935), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T22:40:14,780 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795 2024-11-18T22:40:14,954 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/cluster_0f0fca93-73ab-52fd-f44b-0daa69ff18bd/zookeeper_0, clientPort=61375, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/cluster_0f0fca93-73ab-52fd-f44b-0daa69ff18bd/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/cluster_0f0fca93-73ab-52fd-f44b-0daa69ff18bd/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T22:40:14,984 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61375 2024-11-18T22:40:15,017 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:40:15,023 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:40:15,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741825_1001 (size=7) 2024-11-18T22:40:15,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741825_1001 (size=7) 2024-11-18T22:40:16,024 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72 with version=8 2024-11-18T22:40:16,035 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/hbase-staging 2024-11-18T22:40:16,229 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-18T22:40:16,839 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3b0ca91ed64a:0 server-side Connection retries=45 2024-11-18T22:40:16,858 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:40:16,859 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T22:40:16,885 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T22:40:16,886 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:40:16,886 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T22:40:17,143 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T22:40:17,244 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-18T22:40:17,257 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-18T22:40:17,262 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T22:40:17,308 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 77526 (auto-detected) 2024-11-18T22:40:17,312 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-18T22:40:17,368 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40255 2024-11-18T22:40:17,401 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40255 connecting to ZooKeeper ensemble=127.0.0.1:61375 2024-11-18T22:40:17,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:402550x0, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T22:40:17,625 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40255-0x101501affd80000 connected 2024-11-18T22:40:18,067 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:40:18,071 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:40:18,088 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:40:18,094 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72, hbase.cluster.distributed=false 2024-11-18T22:40:18,140 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T22:40:18,153 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40255 2024-11-18T22:40:18,154 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40255 2024-11-18T22:40:18,155 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40255 2024-11-18T22:40:18,156 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40255 2024-11-18T22:40:18,164 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40255 2024-11-18T22:40:18,347 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3b0ca91ed64a:0 server-side Connection retries=45 2024-11-18T22:40:18,350 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:40:18,350 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T22:40:18,351 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T22:40:18,351 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:40:18,351 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T22:40:18,355 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T22:40:18,361 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T22:40:18,366 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39437 2024-11-18T22:40:18,369 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39437 connecting to ZooKeeper ensemble=127.0.0.1:61375 2024-11-18T22:40:18,370 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:40:18,377 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:40:18,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:394370x0, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T22:40:18,454 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:394370x0, quorum=127.0.0.1:61375, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:40:18,460 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T22:40:18,479 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39437-0x101501affd80001 connected 2024-11-18T22:40:18,496 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T22:40:18,512 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39437-0x101501affd80001, quorum=127.0.0.1:61375, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T22:40:18,551 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39437-0x101501affd80001, quorum=127.0.0.1:61375, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T22:40:18,564 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39437 2024-11-18T22:40:18,572 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39437 2024-11-18T22:40:18,588 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39437 2024-11-18T22:40:18,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39437 2024-11-18T22:40:18,599 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39437 2024-11-18T22:40:18,629 DEBUG [M:0;3b0ca91ed64a:40255 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3b0ca91ed64a:40255 2024-11-18T22:40:18,637 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3b0ca91ed64a,40255,1731969616515 2024-11-18T22:40:18,720 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39437-0x101501affd80001, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T22:40:18,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T22:40:18,742 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3b0ca91ed64a,40255,1731969616515 2024-11-18T22:40:18,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:40:18,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39437-0x101501affd80001, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T22:40:18,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39437-0x101501affd80001, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:40:18,837 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T22:40:18,849 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3b0ca91ed64a,40255,1731969616515 from backup master directory 2024-11-18T22:40:18,864 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3b0ca91ed64a,40255,1731969616515 2024-11-18T22:40:18,865 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T22:40:18,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39437-0x101501affd80001, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T22:40:18,877 WARN [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T22:40:18,877 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3b0ca91ed64a,40255,1731969616515 2024-11-18T22:40:18,882 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-18T22:40:18,884 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-18T22:40:19,078 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/hbase.id] with ID: 9d19e242-c4fc-401a-a8a3-59b30e612050 2024-11-18T22:40:19,079 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/.tmp/hbase.id 2024-11-18T22:40:19,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741826_1002 (size=42) 2024-11-18T22:40:19,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741826_1002 (size=42) 2024-11-18T22:40:19,160 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/.tmp/hbase.id]:[hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/hbase.id] 2024-11-18T22:40:19,347 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:40:19,365 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T22:40:19,433 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 59ms. 2024-11-18T22:40:19,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39437-0x101501affd80001, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:40:19,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:40:19,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741827_1003 (size=196) 2024-11-18T22:40:19,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741827_1003 (size=196) 2024-11-18T22:40:19,674 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T22:40:19,680 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T22:40:19,694 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T22:40:19,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741828_1004 (size=1189) 2024-11-18T22:40:19,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741828_1004 (size=1189) 2024-11-18T22:40:20,213 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/data/master/store 2024-11-18T22:40:20,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741829_1005 (size=34) 2024-11-18T22:40:20,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741829_1005 (size=34) 2024-11-18T22:40:20,301 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-18T22:40:20,305 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:40:20,306 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T22:40:20,306 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:40:20,307 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:40:20,309 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T22:40:20,310 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:40:20,310 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:40:20,312 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731969620306Disabling compacts and flushes for region at 1731969620306Disabling writes for close at 1731969620309 (+3 ms)Writing region close event to WAL at 1731969620310 (+1 ms)Closed at 1731969620310 2024-11-18T22:40:20,315 WARN [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/data/master/store/.initializing 2024-11-18T22:40:20,315 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/WALs/3b0ca91ed64a,40255,1731969616515 2024-11-18T22:40:20,358 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3b0ca91ed64a%2C40255%2C1731969616515, suffix=, logDir=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/WALs/3b0ca91ed64a,40255,1731969616515, archiveDir=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/oldWALs, maxLogs=10 2024-11-18T22:40:20,371 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C40255%2C1731969616515.1731969620364 2024-11-18T22:40:20,402 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/WALs/3b0ca91ed64a,40255,1731969616515/3b0ca91ed64a%2C40255%2C1731969616515.1731969620364 2024-11-18T22:40:20,417 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34341:34341),(127.0.0.1/127.0.0.1:38809:38809)] 2024-11-18T22:40:20,443 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T22:40:20,444 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:40:20,449 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:40:20,451 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:40:20,512 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:40:20,550 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T22:40:20,555 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:40:20,563 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:40:20,564 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:40:20,591 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T22:40:20,594 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:40:20,600 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T22:40:20,600 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:40:20,619 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T22:40:20,619 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:40:20,626 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T22:40:20,627 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:40:20,645 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T22:40:20,646 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:40:20,653 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T22:40:20,654 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:40:20,659 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:40:20,661 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:40:20,677 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:40:20,678 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:40:20,684 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T22:40:20,690 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:40:20,706 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T22:40:20,710 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=829164, jitterRate=0.05433663725852966}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T22:40:20,723 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731969620470Initializing all the Stores at 1731969620473 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969620473Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969620476 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969620476Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969620476Cleaning up temporary data from old regions at 1731969620678 (+202 ms)Region opened successfully at 1731969620723 (+45 ms) 2024-11-18T22:40:20,728 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T22:40:20,802 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c5d3483, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3b0ca91ed64a/172.17.0.2:0 2024-11-18T22:40:20,865 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T22:40:20,886 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T22:40:20,886 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T22:40:20,892 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T22:40:20,897 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 4 msec 2024-11-18T22:40:20,905 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 8 msec 2024-11-18T22:40:20,906 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T22:40:20,959 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T22:40:20,977 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T22:40:21,033 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T22:40:21,036 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T22:40:21,041 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T22:40:21,056 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T22:40:21,064 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T22:40:21,078 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T22:40:21,098 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T22:40:21,101 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T22:40:21,107 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T22:40:21,135 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T22:40:21,148 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T22:40:21,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T22:40:21,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:40:21,169 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39437-0x101501affd80001, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T22:40:21,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39437-0x101501affd80001, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:40:21,177 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3b0ca91ed64a,40255,1731969616515, sessionid=0x101501affd80000, setting cluster-up flag (Was=false) 2024-11-18T22:40:21,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:40:21,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39437-0x101501affd80001, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:40:21,236 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T22:40:21,239 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3b0ca91ed64a,40255,1731969616515 2024-11-18T22:40:21,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39437-0x101501affd80001, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:40:21,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:40:21,340 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T22:40:21,361 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3b0ca91ed64a,40255,1731969616515 2024-11-18T22:40:21,413 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T22:40:21,452 INFO [RS:0;3b0ca91ed64a:39437 {}] regionserver.HRegionServer(746): ClusterId : 9d19e242-c4fc-401a-a8a3-59b30e612050 2024-11-18T22:40:21,455 DEBUG [RS:0;3b0ca91ed64a:39437 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T22:40:21,495 DEBUG [RS:0;3b0ca91ed64a:39437 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T22:40:21,496 DEBUG [RS:0;3b0ca91ed64a:39437 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T22:40:21,529 DEBUG [RS:0;3b0ca91ed64a:39437 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T22:40:21,530 DEBUG [RS:0;3b0ca91ed64a:39437 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@cebab5f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3b0ca91ed64a/172.17.0.2:0 2024-11-18T22:40:21,559 DEBUG [RS:0;3b0ca91ed64a:39437 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3b0ca91ed64a:39437 2024-11-18T22:40:21,566 INFO [RS:0;3b0ca91ed64a:39437 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T22:40:21,566 INFO [RS:0;3b0ca91ed64a:39437 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T22:40:21,566 DEBUG [RS:0;3b0ca91ed64a:39437 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T22:40:21,571 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T22:40:21,572 INFO [RS:0;3b0ca91ed64a:39437 {}] regionserver.HRegionServer(2659): reportForDuty to master=3b0ca91ed64a,40255,1731969616515 with port=39437, startcode=1731969618285 2024-11-18T22:40:21,588 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T22:40:21,596 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T22:40:21,602 DEBUG [RS:0;3b0ca91ed64a:39437 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T22:40:21,623 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3b0ca91ed64a,40255,1731969616515 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T22:40:21,660 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3b0ca91ed64a:0, corePoolSize=5, maxPoolSize=5 2024-11-18T22:40:21,661 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3b0ca91ed64a:0, corePoolSize=5, maxPoolSize=5 2024-11-18T22:40:21,661 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=5, maxPoolSize=5 2024-11-18T22:40:21,661 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=5, maxPoolSize=5 2024-11-18T22:40:21,661 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3b0ca91ed64a:0, corePoolSize=10, maxPoolSize=10 2024-11-18T22:40:21,662 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:40:21,662 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=2, maxPoolSize=2 2024-11-18T22:40:21,662 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:40:21,687 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731969651686 2024-11-18T22:40:21,689 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T22:40:21,690 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T22:40:21,695 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T22:40:21,695 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T22:40:21,701 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T22:40:21,701 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T22:40:21,702 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T22:40:21,703 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T22:40:21,708 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T22:40:21,718 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:40:21,719 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T22:40:21,721 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T22:40:21,728 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T22:40:21,730 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T22:40:21,752 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T22:40:21,752 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T22:40:21,755 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.large.0-1731969621754,5,FailOnTimeoutGroup] 2024-11-18T22:40:21,767 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.small.0-1731969621757,5,FailOnTimeoutGroup] 2024-11-18T22:40:21,768 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T22:40:21,768 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T22:40:21,769 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T22:40:21,770 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T22:40:21,791 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56305, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T22:40:21,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741831_1007 (size=1321) 2024-11-18T22:40:21,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741831_1007 (size=1321) 2024-11-18T22:40:21,797 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T22:40:21,798 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72 2024-11-18T22:40:21,799 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40255 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3b0ca91ed64a,39437,1731969618285 2024-11-18T22:40:21,804 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40255 {}] master.ServerManager(517): Registering regionserver=3b0ca91ed64a,39437,1731969618285 2024-11-18T22:40:21,833 DEBUG [RS:0;3b0ca91ed64a:39437 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72 2024-11-18T22:40:21,833 DEBUG [RS:0;3b0ca91ed64a:39437 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45791 2024-11-18T22:40:21,833 DEBUG [RS:0;3b0ca91ed64a:39437 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T22:40:21,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T22:40:21,864 DEBUG [RS:0;3b0ca91ed64a:39437 {}] zookeeper.ZKUtil(111): regionserver:39437-0x101501affd80001, quorum=127.0.0.1:61375, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3b0ca91ed64a,39437,1731969618285 2024-11-18T22:40:21,865 WARN [RS:0;3b0ca91ed64a:39437 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T22:40:21,865 INFO [RS:0;3b0ca91ed64a:39437 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T22:40:21,865 DEBUG [RS:0;3b0ca91ed64a:39437 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285 2024-11-18T22:40:21,877 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3b0ca91ed64a,39437,1731969618285] 2024-11-18T22:40:21,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741832_1008 (size=32) 2024-11-18T22:40:21,893 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:40:21,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741832_1008 (size=32) 2024-11-18T22:40:21,908 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T22:40:21,918 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T22:40:21,919 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:40:21,921 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:40:21,921 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T22:40:21,931 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T22:40:21,931 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:40:21,933 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:40:21,933 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T22:40:21,936 INFO [RS:0;3b0ca91ed64a:39437 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T22:40:21,937 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T22:40:21,937 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:40:21,938 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:40:21,939 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T22:40:21,950 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T22:40:21,950 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:40:21,952 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:40:21,953 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T22:40:21,956 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/hbase/meta/1588230740 2024-11-18T22:40:21,961 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/hbase/meta/1588230740 2024-11-18T22:40:21,967 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T22:40:21,968 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T22:40:21,970 INFO [RS:0;3b0ca91ed64a:39437 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T22:40:21,970 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T22:40:21,975 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T22:40:21,981 INFO [RS:0;3b0ca91ed64a:39437 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T22:40:21,981 INFO [RS:0;3b0ca91ed64a:39437 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:40:21,986 INFO [RS:0;3b0ca91ed64a:39437 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T22:40:21,996 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T22:40:22,000 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=876439, jitterRate=0.11445097625255585}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T22:40:22,006 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731969621893Initializing all the Stores at 1731969621901 (+8 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969621901Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969621904 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969621904Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969621904Cleaning up temporary data from old regions at 1731969621968 (+64 ms)Region opened successfully at 1731969622005 (+37 ms) 2024-11-18T22:40:22,006 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T22:40:22,006 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T22:40:22,006 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T22:40:22,007 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T22:40:22,007 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T22:40:22,008 INFO [RS:0;3b0ca91ed64a:39437 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T22:40:22,010 INFO [RS:0;3b0ca91ed64a:39437 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T22:40:22,011 DEBUG [RS:0;3b0ca91ed64a:39437 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:40:22,011 DEBUG [RS:0;3b0ca91ed64a:39437 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:40:22,011 DEBUG [RS:0;3b0ca91ed64a:39437 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:40:22,012 DEBUG [RS:0;3b0ca91ed64a:39437 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:40:22,012 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T22:40:22,012 DEBUG [RS:0;3b0ca91ed64a:39437 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:40:22,012 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731969622006Disabling compacts and flushes for region at 1731969622006Disabling writes for close at 1731969622007 (+1 ms)Writing region close event to WAL at 1731969622011 (+4 ms)Closed at 1731969622011 2024-11-18T22:40:22,012 DEBUG [RS:0;3b0ca91ed64a:39437 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3b0ca91ed64a:0, corePoolSize=2, maxPoolSize=2 2024-11-18T22:40:22,012 DEBUG [RS:0;3b0ca91ed64a:39437 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:40:22,016 DEBUG [RS:0;3b0ca91ed64a:39437 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:40:22,017 DEBUG [RS:0;3b0ca91ed64a:39437 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:40:22,017 DEBUG [RS:0;3b0ca91ed64a:39437 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:40:22,020 DEBUG [RS:0;3b0ca91ed64a:39437 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:40:22,021 DEBUG [RS:0;3b0ca91ed64a:39437 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:40:22,021 DEBUG [RS:0;3b0ca91ed64a:39437 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3b0ca91ed64a:0, corePoolSize=3, maxPoolSize=3 2024-11-18T22:40:22,021 DEBUG [RS:0;3b0ca91ed64a:39437 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0, corePoolSize=3, maxPoolSize=3 2024-11-18T22:40:22,022 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T22:40:22,022 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T22:40:22,027 INFO [RS:0;3b0ca91ed64a:39437 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T22:40:22,028 INFO [RS:0;3b0ca91ed64a:39437 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T22:40:22,028 INFO [RS:0;3b0ca91ed64a:39437 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:40:22,028 INFO [RS:0;3b0ca91ed64a:39437 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T22:40:22,029 INFO [RS:0;3b0ca91ed64a:39437 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T22:40:22,032 INFO [RS:0;3b0ca91ed64a:39437 {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,39437,1731969618285-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T22:40:22,033 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T22:40:22,059 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T22:40:22,064 INFO [RS:0;3b0ca91ed64a:39437 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T22:40:22,066 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T22:40:22,066 INFO [RS:0;3b0ca91ed64a:39437 {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,39437,1731969618285-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:40:22,067 INFO [RS:0;3b0ca91ed64a:39437 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:40:22,068 INFO [RS:0;3b0ca91ed64a:39437 {}] regionserver.Replication(171): 3b0ca91ed64a,39437,1731969618285 started 2024-11-18T22:40:22,123 INFO [RS:0;3b0ca91ed64a:39437 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:40:22,124 INFO [RS:0;3b0ca91ed64a:39437 {}] regionserver.HRegionServer(1482): Serving as 3b0ca91ed64a,39437,1731969618285, RpcServer on 3b0ca91ed64a/172.17.0.2:39437, sessionid=0x101501affd80001 2024-11-18T22:40:22,125 DEBUG [RS:0;3b0ca91ed64a:39437 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T22:40:22,125 DEBUG [RS:0;3b0ca91ed64a:39437 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3b0ca91ed64a,39437,1731969618285 2024-11-18T22:40:22,125 DEBUG [RS:0;3b0ca91ed64a:39437 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3b0ca91ed64a,39437,1731969618285' 2024-11-18T22:40:22,125 DEBUG [RS:0;3b0ca91ed64a:39437 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T22:40:22,127 DEBUG [RS:0;3b0ca91ed64a:39437 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T22:40:22,127 DEBUG [RS:0;3b0ca91ed64a:39437 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T22:40:22,127 DEBUG [RS:0;3b0ca91ed64a:39437 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T22:40:22,128 DEBUG [RS:0;3b0ca91ed64a:39437 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3b0ca91ed64a,39437,1731969618285 2024-11-18T22:40:22,128 DEBUG [RS:0;3b0ca91ed64a:39437 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3b0ca91ed64a,39437,1731969618285' 2024-11-18T22:40:22,128 DEBUG [RS:0;3b0ca91ed64a:39437 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T22:40:22,129 DEBUG [RS:0;3b0ca91ed64a:39437 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T22:40:22,130 DEBUG [RS:0;3b0ca91ed64a:39437 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T22:40:22,130 INFO [RS:0;3b0ca91ed64a:39437 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T22:40:22,130 INFO [RS:0;3b0ca91ed64a:39437 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T22:40:22,217 WARN [3b0ca91ed64a:40255 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-18T22:40:22,239 INFO [RS:0;3b0ca91ed64a:39437 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3b0ca91ed64a%2C39437%2C1731969618285, suffix=, logDir=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285, archiveDir=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/oldWALs, maxLogs=32 2024-11-18T22:40:22,243 INFO [RS:0;3b0ca91ed64a:39437 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C39437%2C1731969618285.1731969622243 2024-11-18T22:40:22,273 INFO [RS:0;3b0ca91ed64a:39437 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969622243 2024-11-18T22:40:22,283 DEBUG [RS:0;3b0ca91ed64a:39437 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38809:38809),(127.0.0.1/127.0.0.1:34341:34341)] 2024-11-18T22:40:22,471 DEBUG [3b0ca91ed64a:40255 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T22:40:22,488 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3b0ca91ed64a,39437,1731969618285 2024-11-18T22:40:22,497 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3b0ca91ed64a,39437,1731969618285, state=OPENING 2024-11-18T22:40:22,678 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T22:40:22,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39437-0x101501affd80001, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:40:22,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:40:22,690 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T22:40:22,692 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T22:40:22,693 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T22:40:22,695 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3b0ca91ed64a,39437,1731969618285}] 2024-11-18T22:40:22,904 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T22:40:22,916 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54637, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T22:40:22,960 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T22:40:22,961 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T22:40:22,979 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3b0ca91ed64a%2C39437%2C1731969618285.meta, suffix=.meta, logDir=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285, archiveDir=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/oldWALs, maxLogs=32 2024-11-18T22:40:22,982 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C39437%2C1731969618285.meta.1731969622981.meta 2024-11-18T22:40:23,021 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.meta.1731969622981.meta 2024-11-18T22:40:23,040 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34341:34341),(127.0.0.1/127.0.0.1:38809:38809)] 2024-11-18T22:40:23,046 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T22:40:23,049 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T22:40:23,053 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T22:40:23,073 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T22:40:23,092 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T22:40:23,094 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:40:23,094 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T22:40:23,094 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T22:40:23,124 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T22:40:23,133 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T22:40:23,134 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:40:23,141 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:40:23,141 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T22:40:23,157 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T22:40:23,157 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:40:23,161 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:40:23,162 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T22:40:23,165 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T22:40:23,165 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:40:23,167 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:40:23,167 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T22:40:23,170 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T22:40:23,171 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:40:23,172 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:40:23,172 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T22:40:23,174 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/hbase/meta/1588230740 2024-11-18T22:40:23,185 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/hbase/meta/1588230740 2024-11-18T22:40:23,197 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T22:40:23,198 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T22:40:23,202 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T22:40:23,210 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T22:40:23,212 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=740262, jitterRate=-0.058708831667900085}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T22:40:23,213 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T22:40:23,217 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731969623095Writing region info on filesystem at 1731969623096 (+1 ms)Initializing all the Stores at 1731969623115 (+19 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969623116 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969623120 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969623120Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969623120Cleaning up temporary data from old regions at 1731969623198 (+78 ms)Running coprocessor post-open hooks at 1731969623213 (+15 ms)Region opened successfully at 1731969623217 (+4 ms) 2024-11-18T22:40:23,233 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731969622883 2024-11-18T22:40:23,264 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T22:40:23,267 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T22:40:23,268 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3b0ca91ed64a,39437,1731969618285 2024-11-18T22:40:23,271 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3b0ca91ed64a,39437,1731969618285, state=OPEN 2024-11-18T22:40:23,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T22:40:23,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39437-0x101501affd80001, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T22:40:23,367 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T22:40:23,367 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T22:40:23,368 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3b0ca91ed64a,39437,1731969618285 2024-11-18T22:40:23,387 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T22:40:23,387 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3b0ca91ed64a,39437,1731969618285 in 673 msec 2024-11-18T22:40:23,400 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T22:40:23,400 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.3570 sec 2024-11-18T22:40:23,402 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T22:40:23,403 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T22:40:23,444 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T22:40:23,446 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3b0ca91ed64a,39437,1731969618285, seqNum=-1] 2024-11-18T22:40:23,513 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T22:40:23,518 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53987, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T22:40:23,631 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 2.1280 sec 2024-11-18T22:40:23,636 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731969623636, completionTime=-1 2024-11-18T22:40:23,639 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T22:40:23,640 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-18T22:40:23,792 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-18T22:40:23,792 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731969683792 2024-11-18T22:40:23,793 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731969743792 2024-11-18T22:40:23,793 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 152 msec 2024-11-18T22:40:23,798 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,40255,1731969616515-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:40:23,798 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,40255,1731969616515-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:40:23,798 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,40255,1731969616515-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:40:23,800 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3b0ca91ed64a:40255, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:40:23,813 DEBUG [master/3b0ca91ed64a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T22:40:23,814 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T22:40:23,820 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T22:40:23,887 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 5.008sec 2024-11-18T22:40:23,889 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T22:40:23,891 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T22:40:23,894 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T22:40:23,895 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T22:40:23,896 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T22:40:23,897 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,40255,1731969616515-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T22:40:23,898 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,40255,1731969616515-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T22:40:23,922 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T22:40:23,923 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T22:40:23,924 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,40255,1731969616515-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:40:23,986 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fd5948b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T22:40:23,997 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-18T22:40:23,997 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-18T22:40:24,003 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3b0ca91ed64a,40255,-1 for getting cluster id 2024-11-18T22:40:24,007 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T22:40:24,027 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '9d19e242-c4fc-401a-a8a3-59b30e612050' 2024-11-18T22:40:24,034 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T22:40:24,035 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "9d19e242-c4fc-401a-a8a3-59b30e612050" 2024-11-18T22:40:24,035 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@670aabaa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T22:40:24,035 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3b0ca91ed64a,40255,-1] 2024-11-18T22:40:24,039 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T22:40:24,042 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:40:24,048 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55874, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T22:40:24,057 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ae1442f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T22:40:24,058 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T22:40:24,084 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3b0ca91ed64a,39437,1731969618285, seqNum=-1] 2024-11-18T22:40:24,085 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T22:40:24,097 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42642, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T22:40:24,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3b0ca91ed64a,40255,1731969616515 2024-11-18T22:40:24,140 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:40:24,150 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T22:40:24,156 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-18T22:40:24,163 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 3b0ca91ed64a,40255,1731969616515 2024-11-18T22:40:24,166 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@52d59cfd 2024-11-18T22:40:24,174 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-18T22:40:24,190 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55880, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-18T22:40:24,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40255 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-18T22:40:24,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40255 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-18T22:40:24,202 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40255 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T22:40:24,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40255 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-18T22:40:24,238 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-18T22:40:24,242 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:40:24,244 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40255 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-18T22:40:24,252 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-18T22:40:24,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40255 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T22:40:24,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741835_1011 (size=389) 2024-11-18T22:40:24,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741835_1011 (size=389) 2024-11-18T22:40:24,378 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a1f4d493e02bb0413aaa499040f4167c, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72 2024-11-18T22:40:24,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741836_1012 (size=72) 2024-11-18T22:40:24,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741836_1012 (size=72) 2024-11-18T22:40:24,406 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:40:24,407 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing a1f4d493e02bb0413aaa499040f4167c, disabling compactions & flushes 2024-11-18T22:40:24,407 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c. 2024-11-18T22:40:24,407 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c. 2024-11-18T22:40:24,407 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c. after waiting 0 ms 2024-11-18T22:40:24,407 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c. 2024-11-18T22:40:24,407 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c. 2024-11-18T22:40:24,408 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for a1f4d493e02bb0413aaa499040f4167c: Waiting for close lock at 1731969624406Disabling compacts and flushes for region at 1731969624406Disabling writes for close at 1731969624407 (+1 ms)Writing region close event to WAL at 1731969624407Closed at 1731969624407 2024-11-18T22:40:24,414 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-18T22:40:24,420 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731969624414"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731969624414"}]},"ts":"1731969624414"} 2024-11-18T22:40:24,438 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-18T22:40:24,449 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-18T22:40:24,454 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731969624449"}]},"ts":"1731969624449"} 2024-11-18T22:40:24,460 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-18T22:40:24,463 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=a1f4d493e02bb0413aaa499040f4167c, ASSIGN}] 2024-11-18T22:40:24,469 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=a1f4d493e02bb0413aaa499040f4167c, ASSIGN 2024-11-18T22:40:24,476 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=a1f4d493e02bb0413aaa499040f4167c, ASSIGN; state=OFFLINE, location=3b0ca91ed64a,39437,1731969618285; forceNewPlan=false, retain=false 2024-11-18T22:40:24,632 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a1f4d493e02bb0413aaa499040f4167c, regionState=OPENING, regionLocation=3b0ca91ed64a,39437,1731969618285 2024-11-18T22:40:24,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=a1f4d493e02bb0413aaa499040f4167c, ASSIGN because future has completed 2024-11-18T22:40:24,672 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a1f4d493e02bb0413aaa499040f4167c, server=3b0ca91ed64a,39437,1731969618285}] 2024-11-18T22:40:24,865 INFO [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c. 2024-11-18T22:40:24,866 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => a1f4d493e02bb0413aaa499040f4167c, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c.', STARTKEY => '', ENDKEY => ''} 2024-11-18T22:40:24,866 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling a1f4d493e02bb0413aaa499040f4167c 2024-11-18T22:40:24,867 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:40:24,867 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for a1f4d493e02bb0413aaa499040f4167c 2024-11-18T22:40:24,867 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for a1f4d493e02bb0413aaa499040f4167c 2024-11-18T22:40:24,887 INFO [StoreOpener-a1f4d493e02bb0413aaa499040f4167c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region a1f4d493e02bb0413aaa499040f4167c 2024-11-18T22:40:24,892 INFO [StoreOpener-a1f4d493e02bb0413aaa499040f4167c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a1f4d493e02bb0413aaa499040f4167c columnFamilyName info 2024-11-18T22:40:24,892 DEBUG [StoreOpener-a1f4d493e02bb0413aaa499040f4167c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:40:24,894 INFO [StoreOpener-a1f4d493e02bb0413aaa499040f4167c-1 {}] regionserver.HStore(327): Store=a1f4d493e02bb0413aaa499040f4167c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T22:40:24,895 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for a1f4d493e02bb0413aaa499040f4167c 2024-11-18T22:40:24,898 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c 2024-11-18T22:40:24,899 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c 2024-11-18T22:40:24,900 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for a1f4d493e02bb0413aaa499040f4167c 2024-11-18T22:40:24,901 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for a1f4d493e02bb0413aaa499040f4167c 2024-11-18T22:40:24,909 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for a1f4d493e02bb0413aaa499040f4167c 2024-11-18T22:40:24,928 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T22:40:24,932 INFO [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened a1f4d493e02bb0413aaa499040f4167c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=766772, jitterRate=-0.024999141693115234}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T22:40:24,933 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a1f4d493e02bb0413aaa499040f4167c 2024-11-18T22:40:24,941 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for a1f4d493e02bb0413aaa499040f4167c: Running coprocessor pre-open hook at 1731969624867Writing region info on filesystem at 1731969624867Initializing all the Stores at 1731969624880 (+13 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969624880Cleaning up temporary data from old regions at 1731969624901 (+21 ms)Running coprocessor post-open hooks at 1731969624933 (+32 ms)Region opened successfully at 1731969624941 (+8 ms) 2024-11-18T22:40:24,960 INFO [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c., pid=6, masterSystemTime=1731969624835 2024-11-18T22:40:24,986 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a1f4d493e02bb0413aaa499040f4167c, regionState=OPEN, openSeqNum=2, regionLocation=3b0ca91ed64a,39437,1731969618285 2024-11-18T22:40:24,988 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c. 2024-11-18T22:40:24,989 INFO [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c. 2024-11-18T22:40:24,998 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a1f4d493e02bb0413aaa499040f4167c, server=3b0ca91ed64a,39437,1731969618285 because future has completed 2024-11-18T22:40:25,042 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-18T22:40:25,042 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure a1f4d493e02bb0413aaa499040f4167c, server=3b0ca91ed64a,39437,1731969618285 in 354 msec 2024-11-18T22:40:25,068 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-18T22:40:25,068 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=a1f4d493e02bb0413aaa499040f4167c, ASSIGN in 579 msec 2024-11-18T22:40:25,073 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-18T22:40:25,074 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731969625073"}]},"ts":"1731969625073"} 2024-11-18T22:40:25,083 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-18T22:40:25,086 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-18T22:40:25,092 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 880 msec 2024-11-18T22:40:27,236 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T22:40:27,236 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-18T22:40:27,247 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-18T22:40:27,248 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-18T22:40:27,252 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T22:40:27,252 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-18T22:40:27,253 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-18T22:40:27,253 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-18T22:40:27,926 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-18T22:40:27,933 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-18T22:40:28,629 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-18T22:40:34,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40255 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T22:40:34,283 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-18T22:40:34,287 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-18T22:40:34,298 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-18T22:40:34,298 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c. 2024-11-18T22:40:34,300 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C39437%2C1731969618285.1731969634299 2024-11-18T22:40:34,344 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:40:34,345 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:40:34,346 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:40:34,346 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:40:34,346 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:40:34,347 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969622243 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969634299 2024-11-18T22:40:34,357 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38809:38809),(127.0.0.1/127.0.0.1:34341:34341)] 2024-11-18T22:40:34,357 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969622243 is not closed yet, will try archiving it next time 2024-11-18T22:40:34,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741833_1009 (size=451) 2024-11-18T22:40:34,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741833_1009 (size=451) 2024-11-18T22:40:34,380 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c., hostname=3b0ca91ed64a,39437,1731969618285, seqNum=2] 2024-11-18T22:40:34,777 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969622243 to hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/oldWALs/3b0ca91ed64a%2C39437%2C1731969618285.1731969622243 2024-11-18T22:40:44,785 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T22:40:46,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39437 {}] regionserver.HRegion(8855): Flush requested on a1f4d493e02bb0413aaa499040f4167c 2024-11-18T22:40:46,442 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a1f4d493e02bb0413aaa499040f4167c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T22:40:46,541 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/.tmp/info/c01474880dd64936a8da3652d982b103 is 1080, key is row0001/info:/1731969634384/Put/seqid=0 2024-11-18T22:40:46,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741838_1014 (size=12509) 2024-11-18T22:40:46,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741838_1014 (size=12509) 2024-11-18T22:40:46,568 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/.tmp/info/c01474880dd64936a8da3652d982b103 2024-11-18T22:40:46,645 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/.tmp/info/c01474880dd64936a8da3652d982b103 as hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/c01474880dd64936a8da3652d982b103 2024-11-18T22:40:46,663 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/c01474880dd64936a8da3652d982b103, entries=7, sequenceid=11, filesize=12.2 K 2024-11-18T22:40:46,678 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for a1f4d493e02bb0413aaa499040f4167c in 229ms, sequenceid=11, compaction requested=false 2024-11-18T22:40:46,680 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a1f4d493e02bb0413aaa499040f4167c: 2024-11-18T22:40:54,485 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C39437%2C1731969618285.1731969654485 2024-11-18T22:40:54,762 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 257 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43407,DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa,DISK], DatanodeInfoWithStorage[127.0.0.1:46783,DS-c58de840-e8cf-4ead-819b-788d98cfed23,DISK]] 2024-11-18T22:40:54,768 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:40:54,772 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:40:54,774 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:40:54,775 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:40:54,775 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:40:54,775 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969634299 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969654485 2024-11-18T22:40:54,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741837_1013 (size=12399) 2024-11-18T22:40:54,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741837_1013 (size=12399) 2024-11-18T22:40:54,812 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34341:34341),(127.0.0.1/127.0.0.1:38809:38809)] 2024-11-18T22:40:54,813 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969634299 is not closed yet, will try archiving it next time 2024-11-18T22:40:55,033 INFO [FSHLog-0-hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72-prefix:3b0ca91ed64a,39437,1731969618285 {}] wal.AbstractFSWAL(1368): Slow sync cost: 212 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46783,DS-c58de840-e8cf-4ead-819b-788d98cfed23,DISK], DatanodeInfoWithStorage[127.0.0.1:43407,DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa,DISK]] 2024-11-18T22:40:57,248 INFO [FSHLog-0-hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72-prefix:3b0ca91ed64a,39437,1731969618285 {}] wal.AbstractFSWAL(1368): Slow sync cost: 209 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46783,DS-c58de840-e8cf-4ead-819b-788d98cfed23,DISK], DatanodeInfoWithStorage[127.0.0.1:43407,DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa,DISK]] 2024-11-18T22:40:59,468 INFO [FSHLog-0-hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72-prefix:3b0ca91ed64a,39437,1731969618285 {}] wal.AbstractFSWAL(1368): Slow sync cost: 216 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46783,DS-c58de840-e8cf-4ead-819b-788d98cfed23,DISK], DatanodeInfoWithStorage[127.0.0.1:43407,DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa,DISK]] 2024-11-18T22:41:01,681 INFO [FSHLog-0-hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72-prefix:3b0ca91ed64a,39437,1731969618285 {}] wal.AbstractFSWAL(1368): Slow sync cost: 207 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46783,DS-c58de840-e8cf-4ead-819b-788d98cfed23,DISK], DatanodeInfoWithStorage[127.0.0.1:43407,DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa,DISK]] 2024-11-18T22:41:01,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39437 {}] regionserver.HRegion(8855): Flush requested on a1f4d493e02bb0413aaa499040f4167c 2024-11-18T22:41:01,682 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a1f4d493e02bb0413aaa499040f4167c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T22:41:01,888 INFO [FSHLog-0-hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72-prefix:3b0ca91ed64a,39437,1731969618285 {}] wal.AbstractFSWAL(1368): Slow sync cost: 204 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46783,DS-c58de840-e8cf-4ead-819b-788d98cfed23,DISK], DatanodeInfoWithStorage[127.0.0.1:43407,DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa,DISK]] 2024-11-18T22:41:01,923 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/.tmp/info/8cc52aab2429497ebf39b5ef2117b89f is 1080, key is row0008/info:/1731969648441/Put/seqid=0 2024-11-18T22:41:01,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741840_1016 (size=12509) 2024-11-18T22:41:01,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741840_1016 (size=12509) 2024-11-18T22:41:02,399 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/.tmp/info/8cc52aab2429497ebf39b5ef2117b89f 2024-11-18T22:41:02,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/.tmp/info/8cc52aab2429497ebf39b5ef2117b89f as hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/8cc52aab2429497ebf39b5ef2117b89f 2024-11-18T22:41:02,546 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/8cc52aab2429497ebf39b5ef2117b89f, entries=7, sequenceid=21, filesize=12.2 K 2024-11-18T22:41:02,753 INFO [FSHLog-0-hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72-prefix:3b0ca91ed64a,39437,1731969618285 {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46783,DS-c58de840-e8cf-4ead-819b-788d98cfed23,DISK], DatanodeInfoWithStorage[127.0.0.1:43407,DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa,DISK]] 2024-11-18T22:41:02,753 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for a1f4d493e02bb0413aaa499040f4167c in 1071ms, sequenceid=21, compaction requested=false 2024-11-18T22:41:02,753 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a1f4d493e02bb0413aaa499040f4167c: 2024-11-18T22:41:02,754 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-18T22:41:02,754 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:41:02,755 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/c01474880dd64936a8da3652d982b103 because midkey is the same as first or last row 2024-11-18T22:41:03,900 INFO [FSHLog-0-hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72-prefix:3b0ca91ed64a,39437,1731969618285 {}] wal.AbstractFSWAL(1368): Slow sync cost: 213 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46783,DS-c58de840-e8cf-4ead-819b-788d98cfed23,DISK], DatanodeInfoWithStorage[127.0.0.1:43407,DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa,DISK]] 2024-11-18T22:41:04,214 INFO [master/3b0ca91ed64a:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-18T22:41:04,214 INFO [master/3b0ca91ed64a:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-18T22:41:06,112 INFO [FSHLog-0-hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72-prefix:3b0ca91ed64a,39437,1731969618285 {}] wal.AbstractFSWAL(1368): Slow sync cost: 207 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46783,DS-c58de840-e8cf-4ead-819b-788d98cfed23,DISK], DatanodeInfoWithStorage[127.0.0.1:43407,DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa,DISK]] 2024-11-18T22:41:06,115 WARN [FSHLog-0-hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72-prefix:3b0ca91ed64a,39437,1731969618285 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46783,DS-c58de840-e8cf-4ead-819b-788d98cfed23,DISK], DatanodeInfoWithStorage[127.0.0.1:43407,DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa,DISK]] 2024-11-18T22:41:06,117 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3b0ca91ed64a%2C39437%2C1731969618285:(num 1731969654485) roll requested 2024-11-18T22:41:06,118 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C39437%2C1731969618285.1731969666117 2024-11-18T22:41:06,389 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 260 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46783,DS-c58de840-e8cf-4ead-819b-788d98cfed23,DISK], DatanodeInfoWithStorage[127.0.0.1:43407,DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa,DISK]] 2024-11-18T22:41:06,389 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:06,392 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:06,400 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:06,401 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:06,408 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:06,412 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969654485 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969666117 2024-11-18T22:41:06,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741839_1015 (size=7739) 2024-11-18T22:41:06,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741839_1015 (size=7739) 2024-11-18T22:41:06,445 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34341:34341),(127.0.0.1/127.0.0.1:38809:38809)] 2024-11-18T22:41:06,445 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969654485 is not closed yet, will try archiving it next time 2024-11-18T22:41:06,446 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969634299 to hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/oldWALs/3b0ca91ed64a%2C39437%2C1731969618285.1731969634299 2024-11-18T22:41:08,324 INFO [FSHLog-0-hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72-prefix:3b0ca91ed64a,39437,1731969618285 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46783,DS-c58de840-e8cf-4ead-819b-788d98cfed23,DISK], DatanodeInfoWithStorage[127.0.0.1:43407,DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa,DISK]] 2024-11-18T22:41:09,867 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region a1f4d493e02bb0413aaa499040f4167c, had cached 0 bytes from a total of 25018 2024-11-18T22:41:10,533 INFO [FSHLog-0-hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72-prefix:3b0ca91ed64a,39437,1731969618285 {}] wal.AbstractFSWAL(1368): Slow sync cost: 204 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46783,DS-c58de840-e8cf-4ead-819b-788d98cfed23,DISK], DatanodeInfoWithStorage[127.0.0.1:43407,DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa,DISK]] 2024-11-18T22:41:12,740 INFO [FSHLog-0-hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72-prefix:3b0ca91ed64a,39437,1731969618285 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46783,DS-c58de840-e8cf-4ead-819b-788d98cfed23,DISK], DatanodeInfoWithStorage[127.0.0.1:43407,DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa,DISK]] 2024-11-18T22:41:14,786 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T22:41:14,946 INFO [FSHLog-0-hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72-prefix:3b0ca91ed64a,39437,1731969618285 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46783,DS-c58de840-e8cf-4ead-819b-788d98cfed23,DISK], DatanodeInfoWithStorage[127.0.0.1:43407,DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa,DISK]] 2024-11-18T22:41:16,948 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-18T22:41:16,949 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C39437%2C1731969618285.1731969676949 2024-11-18T22:41:21,981 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5028 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46783,DS-c58de840-e8cf-4ead-819b-788d98cfed23,DISK], DatanodeInfoWithStorage[127.0.0.1:43407,DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa,DISK]] 2024-11-18T22:41:21,983 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5028 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46783,DS-c58de840-e8cf-4ead-819b-788d98cfed23,DISK], DatanodeInfoWithStorage[127.0.0.1:43407,DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa,DISK]] 2024-11-18T22:41:21,984 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:21,984 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3b0ca91ed64a%2C39437%2C1731969618285:(num 1731969676949) roll requested 2024-11-18T22:41:21,992 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:21,993 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:21,993 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:21,994 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:21,994 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969666117 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969676949 2024-11-18T22:41:21,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741841_1017 (size=4753) 2024-11-18T22:41:21,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741841_1017 (size=4753) 2024-11-18T22:41:22,002 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38809:38809),(127.0.0.1/127.0.0.1:34341:34341)] 2024-11-18T22:41:22,003 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969666117 is not closed yet, will try archiving it next time 2024-11-18T22:41:22,004 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C39437%2C1731969618285.1731969682004 2024-11-18T22:41:27,014 INFO [FSHLog-0-hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72-prefix:3b0ca91ed64a,39437,1731969618285 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43407,DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa,DISK], DatanodeInfoWithStorage[127.0.0.1:46783,DS-c58de840-e8cf-4ead-819b-788d98cfed23,DISK]] 2024-11-18T22:41:27,014 WARN [FSHLog-0-hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72-prefix:3b0ca91ed64a,39437,1731969618285 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43407,DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa,DISK], DatanodeInfoWithStorage[127.0.0.1:46783,DS-c58de840-e8cf-4ead-819b-788d98cfed23,DISK]] 2024-11-18T22:41:27,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39437 {}] regionserver.HRegion(8855): Flush requested on a1f4d493e02bb0413aaa499040f4167c 2024-11-18T22:41:27,014 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a1f4d493e02bb0413aaa499040f4167c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T22:41:27,022 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5009 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43407,DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa,DISK], DatanodeInfoWithStorage[127.0.0.1:46783,DS-c58de840-e8cf-4ead-819b-788d98cfed23,DISK]] 2024-11-18T22:41:27,022 WARN [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5009 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43407,DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa,DISK], DatanodeInfoWithStorage[127.0.0.1:46783,DS-c58de840-e8cf-4ead-819b-788d98cfed23,DISK]] 2024-11-18T22:41:29,015 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-18T22:41:32,016 INFO [FSHLog-0-hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72-prefix:3b0ca91ed64a,39437,1731969618285 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43407,DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa,DISK], DatanodeInfoWithStorage[127.0.0.1:46783,DS-c58de840-e8cf-4ead-819b-788d98cfed23,DISK]] 2024-11-18T22:41:32,017 WARN [FSHLog-0-hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72-prefix:3b0ca91ed64a,39437,1731969618285 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43407,DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa,DISK], DatanodeInfoWithStorage[127.0.0.1:46783,DS-c58de840-e8cf-4ead-819b-788d98cfed23,DISK]] 2024-11-18T22:41:32,017 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:32,017 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:32,017 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:32,017 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:32,017 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:32,018 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969676949 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969682004 2024-11-18T22:41:32,019 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34341:34341),(127.0.0.1/127.0.0.1:38809:38809)] 2024-11-18T22:41:32,019 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969676949 is not closed yet, will try archiving it next time 2024-11-18T22:41:32,019 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3b0ca91ed64a%2C39437%2C1731969618285:(num 1731969682004) roll requested 2024-11-18T22:41:32,019 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C39437%2C1731969618285.1731969692019 2024-11-18T22:41:32,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741842_1018 (size=1569) 2024-11-18T22:41:32,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741842_1018 (size=1569) 2024-11-18T22:41:32,023 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/.tmp/info/ebcf0da7dc8c46dfb003ad5f0ce5b183 is 1080, key is row0015/info:/1731969663686/Put/seqid=0 2024-11-18T22:41:32,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741844_1020 (size=12509) 2024-11-18T22:41:32,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741844_1020 (size=12509) 2024-11-18T22:41:32,035 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/.tmp/info/ebcf0da7dc8c46dfb003ad5f0ce5b183 2024-11-18T22:41:32,046 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/.tmp/info/ebcf0da7dc8c46dfb003ad5f0ce5b183 as hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/ebcf0da7dc8c46dfb003ad5f0ce5b183 2024-11-18T22:41:32,056 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/ebcf0da7dc8c46dfb003ad5f0ce5b183, entries=7, sequenceid=31, filesize=12.2 K 2024-11-18T22:41:37,046 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5024 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46783,DS-c58de840-e8cf-4ead-819b-788d98cfed23,DISK], DatanodeInfoWithStorage[127.0.0.1:43407,DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa,DISK]] 2024-11-18T22:41:37,046 WARN [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5024 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46783,DS-c58de840-e8cf-4ead-819b-788d98cfed23,DISK], DatanodeInfoWithStorage[127.0.0.1:43407,DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa,DISK]] 2024-11-18T22:41:37,058 INFO [FSHLog-0-hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72-prefix:3b0ca91ed64a,39437,1731969618285 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46783,DS-c58de840-e8cf-4ead-819b-788d98cfed23,DISK], DatanodeInfoWithStorage[127.0.0.1:43407,DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa,DISK]] 2024-11-18T22:41:37,058 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for a1f4d493e02bb0413aaa499040f4167c in 10043ms, sequenceid=31, compaction requested=true 2024-11-18T22:41:37,058 WARN [FSHLog-0-hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72-prefix:3b0ca91ed64a,39437,1731969618285 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46783,DS-c58de840-e8cf-4ead-819b-788d98cfed23,DISK], DatanodeInfoWithStorage[127.0.0.1:43407,DS-a5d1097b-5db6-4d6a-94fa-74702e3075aa,DISK]] 2024-11-18T22:41:37,058 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a1f4d493e02bb0413aaa499040f4167c: 2024-11-18T22:41:37,058 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:37,058 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:37,058 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-18T22:41:37,058 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:41:37,058 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/c01474880dd64936a8da3652d982b103 because midkey is the same as first or last row 2024-11-18T22:41:37,058 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:37,060 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:37,061 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a1f4d493e02bb0413aaa499040f4167c:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T22:41:37,062 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:37,062 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969682004 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969692019 2024-11-18T22:41:37,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:41:37,063 DEBUG [RS:0;3b0ca91ed64a:39437-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T22:41:37,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741843_1019 (size=438) 2024-11-18T22:41:37,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741843_1019 (size=438) 2024-11-18T22:41:37,070 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969654485 to hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/oldWALs/3b0ca91ed64a%2C39437%2C1731969618285.1731969654485 2024-11-18T22:41:37,071 DEBUG [RS:0;3b0ca91ed64a:39437-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T22:41:37,073 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34341:34341),(127.0.0.1/127.0.0.1:38809:38809)] 2024-11-18T22:41:37,073 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969666117 to hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/oldWALs/3b0ca91ed64a%2C39437%2C1731969618285.1731969666117 2024-11-18T22:41:37,073 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3b0ca91ed64a%2C39437%2C1731969618285:(num 1731969697073) roll requested 2024-11-18T22:41:37,073 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C39437%2C1731969618285.1731969697073 2024-11-18T22:41:37,074 DEBUG [RS:0;3b0ca91ed64a:39437-shortCompactions-0 {}] regionserver.HStore(1541): a1f4d493e02bb0413aaa499040f4167c/info is initiating minor compaction (all files) 2024-11-18T22:41:37,074 INFO [RS:0;3b0ca91ed64a:39437-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a1f4d493e02bb0413aaa499040f4167c/info in TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c. 2024-11-18T22:41:37,075 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969676949 to hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/oldWALs/3b0ca91ed64a%2C39437%2C1731969618285.1731969676949 2024-11-18T22:41:37,075 INFO [RS:0;3b0ca91ed64a:39437-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/c01474880dd64936a8da3652d982b103, hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/8cc52aab2429497ebf39b5ef2117b89f, hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/ebcf0da7dc8c46dfb003ad5f0ce5b183] into tmpdir=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/.tmp, totalSize=36.6 K 2024-11-18T22:41:37,078 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969682004 to hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/oldWALs/3b0ca91ed64a%2C39437%2C1731969618285.1731969682004 2024-11-18T22:41:37,080 DEBUG [RS:0;3b0ca91ed64a:39437-shortCompactions-0 {}] compactions.Compactor(225): Compacting c01474880dd64936a8da3652d982b103, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731969634384 2024-11-18T22:41:37,081 DEBUG [RS:0;3b0ca91ed64a:39437-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8cc52aab2429497ebf39b5ef2117b89f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731969648441 2024-11-18T22:41:37,085 DEBUG [RS:0;3b0ca91ed64a:39437-shortCompactions-0 {}] compactions.Compactor(225): Compacting ebcf0da7dc8c46dfb003ad5f0ce5b183, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731969663686 2024-11-18T22:41:37,097 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:37,098 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:37,098 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:37,100 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:37,100 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:37,101 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969692019 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969697073 2024-11-18T22:41:37,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741845_1021 (size=93) 2024-11-18T22:41:37,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741845_1021 (size=93) 2024-11-18T22:41:37,109 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34341:34341),(127.0.0.1/127.0.0.1:38809:38809)] 2024-11-18T22:41:37,109 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969692019 is not closed yet, will try archiving it next time 2024-11-18T22:41:37,111 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C39437%2C1731969618285.1731969697110 2024-11-18T22:41:37,147 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:37,148 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:37,148 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:37,148 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:37,148 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:37,148 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969697073 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969697110 2024-11-18T22:41:37,153 INFO [RS:0;3b0ca91ed64a:39437-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1f4d493e02bb0413aaa499040f4167c#info#compaction#3 average throughput is 7.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T22:41:37,155 DEBUG [RS:0;3b0ca91ed64a:39437-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/.tmp/info/40bd79680c974d64aba52230b627694b is 1080, key is row0001/info:/1731969634384/Put/seqid=0 2024-11-18T22:41:37,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741846_1022 (size=1258) 2024-11-18T22:41:37,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741846_1022 (size=1258) 2024-11-18T22:41:37,164 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969692019 is not closed yet, will try archiving it next time 2024-11-18T22:41:37,180 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34341:34341),(127.0.0.1/127.0.0.1:38809:38809)] 2024-11-18T22:41:37,181 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969692019 is not closed yet, will try archiving it next time 2024-11-18T22:41:37,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741848_1024 (size=27710) 2024-11-18T22:41:37,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741848_1024 (size=27710) 2024-11-18T22:41:37,257 DEBUG [RS:0;3b0ca91ed64a:39437-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/.tmp/info/40bd79680c974d64aba52230b627694b as hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/40bd79680c974d64aba52230b627694b 2024-11-18T22:41:37,296 INFO [RS:0;3b0ca91ed64a:39437-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a1f4d493e02bb0413aaa499040f4167c/info of a1f4d493e02bb0413aaa499040f4167c into 40bd79680c974d64aba52230b627694b(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T22:41:37,296 DEBUG [RS:0;3b0ca91ed64a:39437-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a1f4d493e02bb0413aaa499040f4167c: 2024-11-18T22:41:37,299 INFO [RS:0;3b0ca91ed64a:39437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c., storeName=a1f4d493e02bb0413aaa499040f4167c/info, priority=13, startTime=1731969697060; duration=0sec 2024-11-18T22:41:37,300 DEBUG [RS:0;3b0ca91ed64a:39437-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-18T22:41:37,300 DEBUG [RS:0;3b0ca91ed64a:39437-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:41:37,300 DEBUG [RS:0;3b0ca91ed64a:39437-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/40bd79680c974d64aba52230b627694b because midkey is the same as first or last row 2024-11-18T22:41:37,301 DEBUG [RS:0;3b0ca91ed64a:39437-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-18T22:41:37,301 DEBUG [RS:0;3b0ca91ed64a:39437-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:41:37,301 DEBUG [RS:0;3b0ca91ed64a:39437-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/40bd79680c974d64aba52230b627694b because midkey is the same as first or last row 2024-11-18T22:41:37,301 DEBUG [RS:0;3b0ca91ed64a:39437-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-18T22:41:37,301 DEBUG [RS:0;3b0ca91ed64a:39437-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:41:37,302 DEBUG [RS:0;3b0ca91ed64a:39437-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/40bd79680c974d64aba52230b627694b because midkey is the same as first or last row 2024-11-18T22:41:37,302 DEBUG [RS:0;3b0ca91ed64a:39437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:41:37,302 DEBUG [RS:0;3b0ca91ed64a:39437-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1f4d493e02bb0413aaa499040f4167c:info 2024-11-18T22:41:37,510 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/WALs/3b0ca91ed64a,39437,1731969618285/3b0ca91ed64a%2C39437%2C1731969618285.1731969692019 to hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/oldWALs/3b0ca91ed64a%2C39437%2C1731969618285.1731969692019 2024-11-18T22:41:44,786 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T22:41:49,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39437 {}] regionserver.HRegion(8855): Flush requested on a1f4d493e02bb0413aaa499040f4167c 2024-11-18T22:41:49,171 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a1f4d493e02bb0413aaa499040f4167c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T22:41:49,207 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/.tmp/info/a655b9e1b07a4d0287ff061d7c51fd13 is 1080, key is row0022/info:/1731969697111/Put/seqid=0 2024-11-18T22:41:49,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741849_1025 (size=12509) 2024-11-18T22:41:49,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741849_1025 (size=12509) 2024-11-18T22:41:49,652 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/.tmp/info/a655b9e1b07a4d0287ff061d7c51fd13 2024-11-18T22:41:49,702 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/.tmp/info/a655b9e1b07a4d0287ff061d7c51fd13 as hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/a655b9e1b07a4d0287ff061d7c51fd13 2024-11-18T22:41:49,747 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/a655b9e1b07a4d0287ff061d7c51fd13, entries=7, sequenceid=42, filesize=12.2 K 2024-11-18T22:41:49,753 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for a1f4d493e02bb0413aaa499040f4167c in 581ms, sequenceid=42, compaction requested=false 2024-11-18T22:41:49,753 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a1f4d493e02bb0413aaa499040f4167c: 2024-11-18T22:41:49,753 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-18T22:41:49,753 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:41:49,753 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/40bd79680c974d64aba52230b627694b because midkey is the same as first or last row 2024-11-18T22:41:54,867 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region a1f4d493e02bb0413aaa499040f4167c, had cached 0 bytes from a total of 40219 2024-11-18T22:41:57,188 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T22:41:57,189 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T22:41:57,189 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T22:41:57,195 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:41:57,199 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:41:57,199 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T22:41:57,199 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T22:41:57,199 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=461911212, stopped=false 2024-11-18T22:41:57,206 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3b0ca91ed64a,40255,1731969616515 2024-11-18T22:41:57,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39437-0x101501affd80001, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T22:41:57,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T22:41:57,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39437-0x101501affd80001, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:41:57,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:41:57,221 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T22:41:57,221 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T22:41:57,222 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T22:41:57,222 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:41:57,222 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3b0ca91ed64a,39437,1731969618285' ***** 2024-11-18T22:41:57,222 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T22:41:57,223 INFO [RS:0;3b0ca91ed64a:39437 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T22:41:57,223 INFO [RS:0;3b0ca91ed64a:39437 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T22:41:57,223 INFO [RS:0;3b0ca91ed64a:39437 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T22:41:57,223 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T22:41:57,223 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39437-0x101501affd80001, quorum=127.0.0.1:61375, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:41:57,223 INFO [RS:0;3b0ca91ed64a:39437 {}] regionserver.HRegionServer(3091): Received CLOSE for a1f4d493e02bb0413aaa499040f4167c 2024-11-18T22:41:57,227 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:41:57,236 INFO [RS:0;3b0ca91ed64a:39437 {}] regionserver.HRegionServer(959): stopping server 3b0ca91ed64a,39437,1731969618285 2024-11-18T22:41:57,236 INFO [RS:0;3b0ca91ed64a:39437 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T22:41:57,236 INFO [RS:0;3b0ca91ed64a:39437 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3b0ca91ed64a:39437. 2024-11-18T22:41:57,237 DEBUG [RS:0;3b0ca91ed64a:39437 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T22:41:57,237 DEBUG [RS:0;3b0ca91ed64a:39437 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:41:57,237 INFO [RS:0;3b0ca91ed64a:39437 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T22:41:57,237 INFO [RS:0;3b0ca91ed64a:39437 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T22:41:57,237 INFO [RS:0;3b0ca91ed64a:39437 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T22:41:57,237 INFO [RS:0;3b0ca91ed64a:39437 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T22:41:57,238 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a1f4d493e02bb0413aaa499040f4167c, disabling compactions & flushes 2024-11-18T22:41:57,239 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c. 2024-11-18T22:41:57,239 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c. 2024-11-18T22:41:57,239 INFO [RS:0;3b0ca91ed64a:39437 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-18T22:41:57,239 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c. after waiting 0 ms 2024-11-18T22:41:57,239 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c. 2024-11-18T22:41:57,239 DEBUG [RS:0;3b0ca91ed64a:39437 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, a1f4d493e02bb0413aaa499040f4167c=TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c.} 2024-11-18T22:41:57,239 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing a1f4d493e02bb0413aaa499040f4167c 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-18T22:41:57,239 DEBUG [RS:0;3b0ca91ed64a:39437 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, a1f4d493e02bb0413aaa499040f4167c 2024-11-18T22:41:57,240 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T22:41:57,240 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T22:41:57,240 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T22:41:57,240 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T22:41:57,240 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T22:41:57,241 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-18T22:41:57,270 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/.tmp/info/9a613335ad4f4c3d842074b1a2a91bef is 1080, key is row0029/info:/1731969711174/Put/seqid=0 2024-11-18T22:41:57,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741850_1026 (size=8193) 2024-11-18T22:41:57,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741850_1026 (size=8193) 2024-11-18T22:41:57,345 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/hbase/meta/1588230740/.tmp/info/6066bb7990c3481bab4efdce6111be86 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c./info:regioninfo/1731969624986/Put/seqid=0 2024-11-18T22:41:57,348 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/.tmp/info/9a613335ad4f4c3d842074b1a2a91bef 2024-11-18T22:41:57,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741851_1027 (size=7016) 2024-11-18T22:41:57,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741851_1027 (size=7016) 2024-11-18T22:41:57,415 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/.tmp/info/9a613335ad4f4c3d842074b1a2a91bef as hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/9a613335ad4f4c3d842074b1a2a91bef 2024-11-18T22:41:57,440 DEBUG [RS:0;3b0ca91ed64a:39437 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, a1f4d493e02bb0413aaa499040f4167c 2024-11-18T22:41:57,478 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/9a613335ad4f4c3d842074b1a2a91bef, entries=3, sequenceid=48, filesize=8.0 K 2024-11-18T22:41:57,482 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for a1f4d493e02bb0413aaa499040f4167c in 242ms, sequenceid=48, compaction requested=true 2024-11-18T22:41:57,488 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/c01474880dd64936a8da3652d982b103, hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/8cc52aab2429497ebf39b5ef2117b89f, hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/ebcf0da7dc8c46dfb003ad5f0ce5b183] to archive 2024-11-18T22:41:57,496 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T22:41:57,524 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/c01474880dd64936a8da3652d982b103 to hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/archive/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/c01474880dd64936a8da3652d982b103 2024-11-18T22:41:57,539 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/8cc52aab2429497ebf39b5ef2117b89f to hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/archive/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/8cc52aab2429497ebf39b5ef2117b89f 2024-11-18T22:41:57,564 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/ebcf0da7dc8c46dfb003ad5f0ce5b183 to hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/archive/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/info/ebcf0da7dc8c46dfb003ad5f0ce5b183 2024-11-18T22:41:57,585 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=3b0ca91ed64a:40255 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-18T22:41:57,591 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [c01474880dd64936a8da3652d982b103=12509, 8cc52aab2429497ebf39b5ef2117b89f=12509, ebcf0da7dc8c46dfb003ad5f0ce5b183=12509] 2024-11-18T22:41:57,627 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/default/TestLogRolling-testSlowSyncLogRolling/a1f4d493e02bb0413aaa499040f4167c/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-18T22:41:57,631 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c. 2024-11-18T22:41:57,631 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a1f4d493e02bb0413aaa499040f4167c: Waiting for close lock at 1731969717238Running coprocessor pre-close hooks at 1731969717238Disabling compacts and flushes for region at 1731969717238Disabling writes for close at 1731969717239 (+1 ms)Obtaining lock to block concurrent updates at 1731969717239Preparing flush snapshotting stores in a1f4d493e02bb0413aaa499040f4167c at 1731969717239Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731969717240 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c. at 1731969717242 (+2 ms)Flushing a1f4d493e02bb0413aaa499040f4167c/info: creating writer at 1731969717242Flushing a1f4d493e02bb0413aaa499040f4167c/info: appending metadata at 1731969717269 (+27 ms)Flushing a1f4d493e02bb0413aaa499040f4167c/info: closing flushed file at 1731969717269Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1480c979: reopening flushed file at 1731969717404 (+135 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for a1f4d493e02bb0413aaa499040f4167c in 242ms, sequenceid=48, compaction requested=true at 1731969717482 (+78 ms)Writing region close event to WAL at 1731969717601 (+119 ms)Running coprocessor post-close hooks at 1731969717628 (+27 ms)Closed at 1731969717631 (+3 ms) 2024-11-18T22:41:57,632 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731969624192.a1f4d493e02bb0413aaa499040f4167c. 2024-11-18T22:41:57,640 DEBUG [RS:0;3b0ca91ed64a:39437 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-18T22:41:57,811 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/hbase/meta/1588230740/.tmp/info/6066bb7990c3481bab4efdce6111be86 2024-11-18T22:41:57,841 DEBUG [RS:0;3b0ca91ed64a:39437 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-18T22:41:57,851 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/hbase/meta/1588230740/.tmp/ns/95c3c2b25bb24ed4b8931a9dfa9e3b06 is 43, key is default/ns:d/1731969623528/Put/seqid=0 2024-11-18T22:41:57,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741852_1028 (size=5153) 2024-11-18T22:41:57,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741852_1028 (size=5153) 2024-11-18T22:41:57,909 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/hbase/meta/1588230740/.tmp/ns/95c3c2b25bb24ed4b8931a9dfa9e3b06 2024-11-18T22:41:57,967 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/hbase/meta/1588230740/.tmp/table/b98e2178c3d84536a6307a88dd12a17f is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731969625073/Put/seqid=0 2024-11-18T22:41:58,032 INFO [regionserver/3b0ca91ed64a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-18T22:41:58,032 INFO [regionserver/3b0ca91ed64a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-18T22:41:58,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741853_1029 (size=5396) 2024-11-18T22:41:58,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741853_1029 (size=5396) 2024-11-18T22:41:58,042 DEBUG [RS:0;3b0ca91ed64a:39437 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-18T22:41:58,044 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/hbase/meta/1588230740/.tmp/table/b98e2178c3d84536a6307a88dd12a17f 2024-11-18T22:41:58,064 INFO [regionserver/3b0ca91ed64a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T22:41:58,090 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/hbase/meta/1588230740/.tmp/info/6066bb7990c3481bab4efdce6111be86 as hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/hbase/meta/1588230740/info/6066bb7990c3481bab4efdce6111be86 2024-11-18T22:41:58,130 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/hbase/meta/1588230740/info/6066bb7990c3481bab4efdce6111be86, entries=10, sequenceid=11, filesize=6.9 K 2024-11-18T22:41:58,136 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/hbase/meta/1588230740/.tmp/ns/95c3c2b25bb24ed4b8931a9dfa9e3b06 as hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/hbase/meta/1588230740/ns/95c3c2b25bb24ed4b8931a9dfa9e3b06 2024-11-18T22:41:58,164 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/hbase/meta/1588230740/ns/95c3c2b25bb24ed4b8931a9dfa9e3b06, entries=2, sequenceid=11, filesize=5.0 K 2024-11-18T22:41:58,168 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/hbase/meta/1588230740/.tmp/table/b98e2178c3d84536a6307a88dd12a17f as hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/hbase/meta/1588230740/table/b98e2178c3d84536a6307a88dd12a17f 2024-11-18T22:41:58,207 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/hbase/meta/1588230740/table/b98e2178c3d84536a6307a88dd12a17f, entries=2, sequenceid=11, filesize=5.3 K 2024-11-18T22:41:58,221 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 980ms, sequenceid=11, compaction requested=false 2024-11-18T22:41:58,242 INFO [RS:0;3b0ca91ed64a:39437 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-18T22:41:58,242 DEBUG [RS:0;3b0ca91ed64a:39437 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-18T22:41:58,242 DEBUG [RS:0;3b0ca91ed64a:39437 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-18T22:41:58,282 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-18T22:41:58,285 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T22:41:58,285 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T22:41:58,286 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731969717240Running coprocessor pre-close hooks at 1731969717240Disabling compacts and flushes for region at 1731969717240Disabling writes for close at 1731969717240Obtaining lock to block concurrent updates at 1731969717241 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731969717241Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731969717241Flushing stores of hbase:meta,,1.1588230740 at 1731969717253 (+12 ms)Flushing 1588230740/info: creating writer at 1731969717253Flushing 1588230740/info: appending metadata at 1731969717338 (+85 ms)Flushing 1588230740/info: closing flushed file at 1731969717338Flushing 1588230740/ns: creating writer at 1731969717824 (+486 ms)Flushing 1588230740/ns: appending metadata at 1731969717850 (+26 ms)Flushing 1588230740/ns: closing flushed file at 1731969717850Flushing 1588230740/table: creating writer at 1731969717933 (+83 ms)Flushing 1588230740/table: appending metadata at 1731969717967 (+34 ms)Flushing 1588230740/table: closing flushed file at 1731969717967Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5522a62c: reopening flushed file at 1731969718082 (+115 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1fc4cca9: reopening flushed file at 1731969718130 (+48 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@31ae867e: reopening flushed file at 1731969718165 (+35 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 980ms, sequenceid=11, compaction requested=false at 1731969718221 (+56 ms)Writing region close event to WAL at 1731969718261 (+40 ms)Running coprocessor post-close hooks at 1731969718285 (+24 ms)Closed at 1731969718285 2024-11-18T22:41:58,286 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T22:41:58,443 INFO [RS:0;3b0ca91ed64a:39437 {}] regionserver.HRegionServer(976): stopping server 3b0ca91ed64a,39437,1731969618285; all regions closed. 2024-11-18T22:41:58,445 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:58,445 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:58,445 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:58,445 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:58,445 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:58,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741834_1010 (size=3066) 2024-11-18T22:41:58,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741834_1010 (size=3066) 2024-11-18T22:41:58,469 DEBUG [RS:0;3b0ca91ed64a:39437 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/oldWALs 2024-11-18T22:41:58,469 INFO [RS:0;3b0ca91ed64a:39437 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3b0ca91ed64a%2C39437%2C1731969618285.meta:.meta(num 1731969622981) 2024-11-18T22:41:58,470 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:58,472 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:58,472 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:58,472 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:58,472 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:58,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741847_1023 (size=12695) 2024-11-18T22:41:58,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741847_1023 (size=12695) 2024-11-18T22:41:58,484 DEBUG [RS:0;3b0ca91ed64a:39437 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/oldWALs 2024-11-18T22:41:58,484 INFO [RS:0;3b0ca91ed64a:39437 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3b0ca91ed64a%2C39437%2C1731969618285:(num 1731969697110) 2024-11-18T22:41:58,484 DEBUG [RS:0;3b0ca91ed64a:39437 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:41:58,484 INFO [RS:0;3b0ca91ed64a:39437 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T22:41:58,485 INFO [RS:0;3b0ca91ed64a:39437 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T22:41:58,487 INFO [RS:0;3b0ca91ed64a:39437 {}] hbase.ChoreService(370): Chore service for: regionserver/3b0ca91ed64a:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-18T22:41:58,488 INFO [RS:0;3b0ca91ed64a:39437 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T22:41:58,488 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T22:41:58,489 INFO [RS:0;3b0ca91ed64a:39437 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39437 2024-11-18T22:41:58,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39437-0x101501affd80001, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3b0ca91ed64a,39437,1731969618285 2024-11-18T22:41:58,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T22:41:58,569 INFO [RS:0;3b0ca91ed64a:39437 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T22:41:58,572 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3b0ca91ed64a,39437,1731969618285] 2024-11-18T22:41:58,636 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3b0ca91ed64a,39437,1731969618285 already deleted, retry=false 2024-11-18T22:41:58,637 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3b0ca91ed64a,39437,1731969618285 expired; onlineServers=0 2024-11-18T22:41:58,637 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3b0ca91ed64a,40255,1731969616515' ***** 2024-11-18T22:41:58,637 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T22:41:58,640 INFO [M:0;3b0ca91ed64a:40255 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T22:41:58,640 INFO [M:0;3b0ca91ed64a:40255 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T22:41:58,640 DEBUG [M:0;3b0ca91ed64a:40255 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T22:41:58,641 DEBUG [M:0;3b0ca91ed64a:40255 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T22:41:58,641 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T22:41:58,641 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.small.0-1731969621757 {}] cleaner.HFileCleaner(306): Exit Thread[master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.small.0-1731969621757,5,FailOnTimeoutGroup] 2024-11-18T22:41:58,641 INFO [M:0;3b0ca91ed64a:40255 {}] hbase.ChoreService(370): Chore service for: master/3b0ca91ed64a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T22:41:58,641 INFO [M:0;3b0ca91ed64a:40255 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T22:41:58,641 DEBUG [M:0;3b0ca91ed64a:40255 {}] master.HMaster(1795): Stopping service threads 2024-11-18T22:41:58,641 INFO [M:0;3b0ca91ed64a:40255 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T22:41:58,641 INFO [M:0;3b0ca91ed64a:40255 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T22:41:58,642 INFO [M:0;3b0ca91ed64a:40255 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T22:41:58,642 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T22:41:58,642 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.large.0-1731969621754 {}] cleaner.HFileCleaner(306): Exit Thread[master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.large.0-1731969621754,5,FailOnTimeoutGroup] 2024-11-18T22:41:58,708 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-18T22:41:58,709 DEBUG [RegionServerTracker-0 {}] master.ActiveMasterManager(353): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-18T22:41:58,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T22:41:58,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:41:58,722 INFO [M:0;3b0ca91ed64a:40255 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/.lastflushedseqids 2024-11-18T22:41:58,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39437-0x101501affd80001, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:41:58,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39437-0x101501affd80001, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:41:58,734 INFO [RS:0;3b0ca91ed64a:39437 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T22:41:58,735 INFO [RS:0;3b0ca91ed64a:39437 {}] regionserver.HRegionServer(1031): Exiting; stopping=3b0ca91ed64a,39437,1731969618285; zookeeper connection closed. 2024-11-18T22:41:58,739 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7a5bc00d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7a5bc00d 2024-11-18T22:41:58,744 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T22:41:58,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741854_1030 (size=130) 2024-11-18T22:41:58,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741854_1030 (size=130) 2024-11-18T22:41:58,791 INFO [M:0;3b0ca91ed64a:40255 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T22:41:58,791 INFO [M:0;3b0ca91ed64a:40255 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T22:41:58,792 DEBUG [M:0;3b0ca91ed64a:40255 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T22:41:58,792 INFO [M:0;3b0ca91ed64a:40255 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:41:58,792 DEBUG [M:0;3b0ca91ed64a:40255 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:41:58,792 DEBUG [M:0;3b0ca91ed64a:40255 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T22:41:58,792 DEBUG [M:0;3b0ca91ed64a:40255 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:41:58,792 INFO [M:0;3b0ca91ed64a:40255 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.04 KB heapSize=29.21 KB 2024-11-18T22:41:58,836 DEBUG [M:0;3b0ca91ed64a:40255 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1f6227a56daa484f94a8d68a160dcb74 is 82, key is hbase:meta,,1/info:regioninfo/1731969623268/Put/seqid=0 2024-11-18T22:41:58,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741855_1031 (size=5672) 2024-11-18T22:41:58,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741855_1031 (size=5672) 2024-11-18T22:41:58,884 INFO [M:0;3b0ca91ed64a:40255 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1f6227a56daa484f94a8d68a160dcb74 2024-11-18T22:41:58,984 DEBUG [M:0;3b0ca91ed64a:40255 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cefc178cd77542ada01eac57d1f7f5cf is 767, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731969625090/Put/seqid=0 2024-11-18T22:41:59,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741856_1032 (size=6248) 2024-11-18T22:41:59,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741856_1032 (size=6248) 2024-11-18T22:41:59,043 INFO [M:0;3b0ca91ed64a:40255 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.43 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cefc178cd77542ada01eac57d1f7f5cf 2024-11-18T22:41:59,077 INFO [M:0;3b0ca91ed64a:40255 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for cefc178cd77542ada01eac57d1f7f5cf 2024-11-18T22:41:59,109 DEBUG [M:0;3b0ca91ed64a:40255 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c7fdbae69ad64333bf40ce5478aa6b10 is 69, key is 3b0ca91ed64a,39437,1731969618285/rs:state/1731969621808/Put/seqid=0 2024-11-18T22:41:59,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741857_1033 (size=5156) 2024-11-18T22:41:59,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741857_1033 (size=5156) 2024-11-18T22:41:59,158 INFO [M:0;3b0ca91ed64a:40255 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c7fdbae69ad64333bf40ce5478aa6b10 2024-11-18T22:41:59,226 DEBUG [M:0;3b0ca91ed64a:40255 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/aeb72008565f416fa9f41b49f6cfeee1 is 52, key is load_balancer_on/state:d/1731969624146/Put/seqid=0 2024-11-18T22:41:59,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741858_1034 (size=5056) 2024-11-18T22:41:59,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741858_1034 (size=5056) 2024-11-18T22:41:59,697 INFO [M:0;3b0ca91ed64a:40255 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/aeb72008565f416fa9f41b49f6cfeee1 2024-11-18T22:41:59,710 DEBUG [M:0;3b0ca91ed64a:40255 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1f6227a56daa484f94a8d68a160dcb74 as hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1f6227a56daa484f94a8d68a160dcb74 2024-11-18T22:41:59,729 INFO [M:0;3b0ca91ed64a:40255 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1f6227a56daa484f94a8d68a160dcb74, entries=8, sequenceid=59, filesize=5.5 K 2024-11-18T22:41:59,733 DEBUG [M:0;3b0ca91ed64a:40255 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cefc178cd77542ada01eac57d1f7f5cf as hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cefc178cd77542ada01eac57d1f7f5cf 2024-11-18T22:41:59,759 INFO [M:0;3b0ca91ed64a:40255 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for cefc178cd77542ada01eac57d1f7f5cf 2024-11-18T22:41:59,759 INFO [M:0;3b0ca91ed64a:40255 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cefc178cd77542ada01eac57d1f7f5cf, entries=6, sequenceid=59, filesize=6.1 K 2024-11-18T22:41:59,764 DEBUG [M:0;3b0ca91ed64a:40255 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c7fdbae69ad64333bf40ce5478aa6b10 as hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c7fdbae69ad64333bf40ce5478aa6b10 2024-11-18T22:41:59,778 INFO [M:0;3b0ca91ed64a:40255 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c7fdbae69ad64333bf40ce5478aa6b10, entries=1, sequenceid=59, filesize=5.0 K 2024-11-18T22:41:59,780 DEBUG [M:0;3b0ca91ed64a:40255 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/aeb72008565f416fa9f41b49f6cfeee1 as hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/aeb72008565f416fa9f41b49f6cfeee1 2024-11-18T22:41:59,793 INFO [M:0;3b0ca91ed64a:40255 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/aeb72008565f416fa9f41b49f6cfeee1, entries=1, sequenceid=59, filesize=4.9 K 2024-11-18T22:41:59,804 INFO [M:0;3b0ca91ed64a:40255 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1012ms, sequenceid=59, compaction requested=false 2024-11-18T22:41:59,843 INFO [M:0;3b0ca91ed64a:40255 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:41:59,843 DEBUG [M:0;3b0ca91ed64a:40255 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731969718791Disabling compacts and flushes for region at 1731969718791Disabling writes for close at 1731969718792 (+1 ms)Obtaining lock to block concurrent updates at 1731969718792Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731969718792Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23588, getHeapSize=29848, getOffHeapSize=0, getCellsCount=70 at 1731969718793 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731969718801 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731969718801Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731969718835 (+34 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731969718835Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731969718917 (+82 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731969718983 (+66 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731969718983Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731969719078 (+95 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731969719107 (+29 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731969719108 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731969719184 (+76 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731969719225 (+41 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731969719225Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4376d645: reopening flushed file at 1731969719708 (+483 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@63317b45: reopening flushed file at 1731969719729 (+21 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2be9a314: reopening flushed file at 1731969719759 (+30 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@39507562: reopening flushed file at 1731969719778 (+19 ms)Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1012ms, sequenceid=59, compaction requested=false at 1731969719804 (+26 ms)Writing region close event to WAL at 1731969719843 (+39 ms)Closed at 1731969719843 2024-11-18T22:41:59,856 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:59,862 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:59,868 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:59,876 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:59,880 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:41:59,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43407 is added to blk_1073741830_1006 (size=27985) 2024-11-18T22:41:59,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46783 is added to blk_1073741830_1006 (size=27985) 2024-11-18T22:41:59,929 INFO [M:0;3b0ca91ed64a:40255 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T22:41:59,929 INFO [M:0;3b0ca91ed64a:40255 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40255 2024-11-18T22:41:59,930 INFO [M:0;3b0ca91ed64a:40255 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T22:41:59,930 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T22:42:00,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:42:00,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40255-0x101501affd80000, quorum=127.0.0.1:61375, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:42:00,061 INFO [M:0;3b0ca91ed64a:40255 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T22:42:00,080 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@590b36b7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:42:00,085 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@237fc06a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:42:00,085 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:42:00,086 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35f1cf70{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:42:00,086 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@43794ae7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/hadoop.log.dir/,STOPPED} 2024-11-18T22:42:00,110 WARN [BP-749680845-172.17.0.2-1731969608935 heartbeating to localhost/127.0.0.1:45791 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T22:42:00,110 WARN [BP-749680845-172.17.0.2-1731969608935 heartbeating to localhost/127.0.0.1:45791 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-749680845-172.17.0.2-1731969608935 (Datanode Uuid ac5ea8cc-5c6e-4d1a-8496-70c70c3ab01a) service to localhost/127.0.0.1:45791 2024-11-18T22:42:00,111 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/cluster_0f0fca93-73ab-52fd-f44b-0daa69ff18bd/data/data3/current/BP-749680845-172.17.0.2-1731969608935 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:42:00,112 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/cluster_0f0fca93-73ab-52fd-f44b-0daa69ff18bd/data/data4/current/BP-749680845-172.17.0.2-1731969608935 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:42:00,116 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T22:42:00,116 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T22:42:00,118 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T22:42:00,152 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@11e88411{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:42:00,153 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@75423500{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:42:00,153 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:42:00,153 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@621a7cbc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:42:00,154 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@26c88bf4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/hadoop.log.dir/,STOPPED} 2024-11-18T22:42:00,173 WARN [BP-749680845-172.17.0.2-1731969608935 heartbeating to localhost/127.0.0.1:45791 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T22:42:00,173 WARN [BP-749680845-172.17.0.2-1731969608935 heartbeating to localhost/127.0.0.1:45791 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-749680845-172.17.0.2-1731969608935 (Datanode Uuid 48e5bdc0-8c30-4e82-bdad-78131f228cbc) service to localhost/127.0.0.1:45791 2024-11-18T22:42:00,174 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/cluster_0f0fca93-73ab-52fd-f44b-0daa69ff18bd/data/data1/current/BP-749680845-172.17.0.2-1731969608935 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:42:00,175 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T22:42:00,175 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T22:42:00,176 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/cluster_0f0fca93-73ab-52fd-f44b-0daa69ff18bd/data/data2/current/BP-749680845-172.17.0.2-1731969608935 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:42:00,177 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T22:42:00,218 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6de997b9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T22:42:00,233 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7a0da00a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:42:00,233 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:42:00,233 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@380b8195{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:42:00,233 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3aee6cb7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/hadoop.log.dir/,STOPPED} 2024-11-18T22:42:00,282 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T22:42:00,358 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T22:42:00,384 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:45791 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: regionserver/3b0ca91ed64a:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45791 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:45791 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/3b0ca91ed64a:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45791 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/3b0ca91ed64a:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:45791 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45791 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:45791 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45791 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@1b34ea30 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1220 (was 1245), ProcessCount=11 (was 11), AvailableMemoryMB=7191 (was 7625) 2024-11-18T22:42:00,409 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=1220, ProcessCount=11, AvailableMemoryMB=7182 2024-11-18T22:42:00,409 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T22:42:00,409 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/hadoop.log.dir so I do NOT create it in target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154 2024-11-18T22:42:00,410 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c9d5e47b-c602-be98-2b95-8f20ff48a795/hadoop.tmp.dir so I do NOT create it in target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154 2024-11-18T22:42:00,410 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/cluster_2fb986bd-7e53-d582-a190-4be5410664ee, deleteOnExit=true 2024-11-18T22:42:00,410 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T22:42:00,410 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/test.cache.data in system properties and HBase conf 2024-11-18T22:42:00,410 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T22:42:00,410 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/hadoop.log.dir in system properties and HBase conf 2024-11-18T22:42:00,410 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T22:42:00,411 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T22:42:00,411 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T22:42:00,411 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T22:42:00,411 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T22:42:00,411 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T22:42:00,411 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T22:42:00,412 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T22:42:00,412 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T22:42:00,412 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T22:42:00,412 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T22:42:00,412 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T22:42:00,412 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T22:42:00,412 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/nfs.dump.dir in system properties and HBase conf 2024-11-18T22:42:00,412 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/java.io.tmpdir in system properties and HBase conf 2024-11-18T22:42:00,413 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T22:42:00,413 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T22:42:00,413 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T22:42:00,444 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T22:42:00,846 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:42:00,858 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:42:00,887 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:42:00,887 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:42:00,888 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T22:42:00,893 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:42:00,904 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46598b1f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:42:00,905 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@db210e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:42:01,109 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7f317c08{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/java.io.tmpdir/jetty-localhost-42997-hadoop-hdfs-3_4_1-tests_jar-_-any-8463611110889140779/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T22:42:01,116 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5262ced6{HTTP/1.1, (http/1.1)}{localhost:42997} 2024-11-18T22:42:01,116 INFO [Time-limited test {}] server.Server(415): Started @114916ms 2024-11-18T22:42:01,160 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T22:42:01,597 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:42:01,604 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:42:01,613 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:42:01,613 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:42:01,613 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T22:42:01,617 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25652898{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:42:01,618 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@55633b2d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:42:01,783 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7cf0399{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/java.io.tmpdir/jetty-localhost-46445-hadoop-hdfs-3_4_1-tests_jar-_-any-7655228418630604854/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:42:01,788 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4a560f35{HTTP/1.1, (http/1.1)}{localhost:46445} 2024-11-18T22:42:01,788 INFO [Time-limited test {}] server.Server(415): Started @115588ms 2024-11-18T22:42:01,791 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T22:42:01,978 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:42:01,989 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:42:02,008 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:42:02,008 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:42:02,008 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T22:42:02,012 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2819a81f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:42:02,013 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@72c1d7ea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:42:02,270 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4902a927{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/java.io.tmpdir/jetty-localhost-39973-hadoop-hdfs-3_4_1-tests_jar-_-any-16025322668114763006/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:42:02,271 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4b1e120d{HTTP/1.1, (http/1.1)}{localhost:39973} 2024-11-18T22:42:02,271 INFO [Time-limited test {}] server.Server(415): Started @116071ms 2024-11-18T22:42:02,273 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T22:42:02,634 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:42:02,650 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:42:03,160 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T22:42:03,168 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:42:03,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:42:03,867 WARN [Thread-452 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/cluster_2fb986bd-7e53-d582-a190-4be5410664ee/data/data1/current/BP-342864332-172.17.0.2-1731969720473/current, will proceed with Du for space computation calculation, 2024-11-18T22:42:03,868 WARN [Thread-453 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/cluster_2fb986bd-7e53-d582-a190-4be5410664ee/data/data2/current/BP-342864332-172.17.0.2-1731969720473/current, will proceed with Du for space computation calculation, 2024-11-18T22:42:03,937 WARN [Thread-415 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T22:42:03,953 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaf6e3dbc96654eec with lease ID 0x3e53bfd93ec78dc4: Processing first storage report for DS-029f44f6-ffda-4001-89bf-7ed663a15a37 from datanode DatanodeRegistration(127.0.0.1:38265, datanodeUuid=0df769e5-50ab-4d76-a3fa-ab359020ce08, infoPort=36889, infoSecurePort=0, ipcPort=33769, storageInfo=lv=-57;cid=testClusterID;nsid=1607928450;c=1731969720473) 2024-11-18T22:42:03,953 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaf6e3dbc96654eec with lease ID 0x3e53bfd93ec78dc4: from storage DS-029f44f6-ffda-4001-89bf-7ed663a15a37 node DatanodeRegistration(127.0.0.1:38265, datanodeUuid=0df769e5-50ab-4d76-a3fa-ab359020ce08, infoPort=36889, infoSecurePort=0, ipcPort=33769, storageInfo=lv=-57;cid=testClusterID;nsid=1607928450;c=1731969720473), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:42:03,953 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xaf6e3dbc96654eec with lease ID 0x3e53bfd93ec78dc4: Processing first storage report for DS-ad94b9cf-4352-4867-9ec0-a050f92df1bb from datanode DatanodeRegistration(127.0.0.1:38265, datanodeUuid=0df769e5-50ab-4d76-a3fa-ab359020ce08, infoPort=36889, infoSecurePort=0, ipcPort=33769, storageInfo=lv=-57;cid=testClusterID;nsid=1607928450;c=1731969720473) 2024-11-18T22:42:03,953 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaf6e3dbc96654eec with lease ID 0x3e53bfd93ec78dc4: from storage DS-ad94b9cf-4352-4867-9ec0-a050f92df1bb node DatanodeRegistration(127.0.0.1:38265, datanodeUuid=0df769e5-50ab-4d76-a3fa-ab359020ce08, infoPort=36889, infoSecurePort=0, ipcPort=33769, storageInfo=lv=-57;cid=testClusterID;nsid=1607928450;c=1731969720473), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:42:03,994 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/cluster_2fb986bd-7e53-d582-a190-4be5410664ee/data/data3/current/BP-342864332-172.17.0.2-1731969720473/current, will proceed with Du for space computation calculation, 2024-11-18T22:42:03,997 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/cluster_2fb986bd-7e53-d582-a190-4be5410664ee/data/data4/current/BP-342864332-172.17.0.2-1731969720473/current, will proceed with Du for space computation calculation, 2024-11-18T22:42:04,069 WARN [Thread-438 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T22:42:04,080 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd63f0cc0e0b442fb with lease ID 0x3e53bfd93ec78dc5: Processing first storage report for DS-cfc8c9c5-a252-4922-a531-d582b48ca8bd from datanode DatanodeRegistration(127.0.0.1:37783, datanodeUuid=d67118bd-d782-4faf-8ed1-dbf02fd0787d, infoPort=41993, infoSecurePort=0, ipcPort=41927, storageInfo=lv=-57;cid=testClusterID;nsid=1607928450;c=1731969720473) 2024-11-18T22:42:04,081 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd63f0cc0e0b442fb with lease ID 0x3e53bfd93ec78dc5: from storage DS-cfc8c9c5-a252-4922-a531-d582b48ca8bd node DatanodeRegistration(127.0.0.1:37783, datanodeUuid=d67118bd-d782-4faf-8ed1-dbf02fd0787d, infoPort=41993, infoSecurePort=0, ipcPort=41927, storageInfo=lv=-57;cid=testClusterID;nsid=1607928450;c=1731969720473), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:42:04,081 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd63f0cc0e0b442fb with lease ID 0x3e53bfd93ec78dc5: Processing first storage report for DS-e4307cbd-35c2-41f4-a3aa-2fb1c6b7e28d from datanode DatanodeRegistration(127.0.0.1:37783, datanodeUuid=d67118bd-d782-4faf-8ed1-dbf02fd0787d, infoPort=41993, infoSecurePort=0, ipcPort=41927, storageInfo=lv=-57;cid=testClusterID;nsid=1607928450;c=1731969720473) 2024-11-18T22:42:04,081 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd63f0cc0e0b442fb with lease ID 0x3e53bfd93ec78dc5: from storage DS-e4307cbd-35c2-41f4-a3aa-2fb1c6b7e28d node DatanodeRegistration(127.0.0.1:37783, datanodeUuid=d67118bd-d782-4faf-8ed1-dbf02fd0787d, infoPort=41993, infoSecurePort=0, ipcPort=41927, storageInfo=lv=-57;cid=testClusterID;nsid=1607928450;c=1731969720473), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:42:04,169 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154 2024-11-18T22:42:04,211 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/cluster_2fb986bd-7e53-d582-a190-4be5410664ee/zookeeper_0, clientPort=58703, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/cluster_2fb986bd-7e53-d582-a190-4be5410664ee/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/cluster_2fb986bd-7e53-d582-a190-4be5410664ee/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T22:42:04,217 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58703 2024-11-18T22:42:04,218 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:42:04,220 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:42:04,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37783 is added to blk_1073741825_1001 (size=7) 2024-11-18T22:42:04,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38265 is added to blk_1073741825_1001 (size=7) 2024-11-18T22:42:04,290 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb with version=8 2024-11-18T22:42:04,290 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/hbase-staging 2024-11-18T22:42:04,294 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3b0ca91ed64a:0 server-side Connection retries=45 2024-11-18T22:42:04,294 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:42:04,294 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T22:42:04,294 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T22:42:04,294 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:42:04,294 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T22:42:04,294 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T22:42:04,295 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T22:42:04,296 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38467 2024-11-18T22:42:04,299 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38467 connecting to ZooKeeper ensemble=127.0.0.1:58703 2024-11-18T22:42:04,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:384670x0, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T22:42:04,408 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38467-0x101501caac50000 connected 2024-11-18T22:42:04,652 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:42:04,655 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:42:04,673 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:42:04,673 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb, hbase.cluster.distributed=false 2024-11-18T22:42:04,680 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T22:42:04,686 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38467 2024-11-18T22:42:04,689 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38467 2024-11-18T22:42:04,690 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38467 2024-11-18T22:42:04,699 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38467 2024-11-18T22:42:04,700 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38467 2024-11-18T22:42:04,734 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3b0ca91ed64a:0 server-side Connection retries=45 2024-11-18T22:42:04,734 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:42:04,734 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T22:42:04,734 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T22:42:04,734 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:42:04,734 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T22:42:04,734 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T22:42:04,735 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T22:42:04,740 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37665 2024-11-18T22:42:04,743 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37665 connecting to ZooKeeper ensemble=127.0.0.1:58703 2024-11-18T22:42:04,744 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:42:04,748 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:42:04,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:376650x0, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T22:42:04,828 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37665-0x101501caac50001 connected 2024-11-18T22:42:04,834 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37665-0x101501caac50001, quorum=127.0.0.1:58703, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:42:04,835 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T22:42:04,839 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T22:42:04,841 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37665-0x101501caac50001, quorum=127.0.0.1:58703, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T22:42:04,848 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37665-0x101501caac50001, quorum=127.0.0.1:58703, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T22:42:04,851 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37665 2024-11-18T22:42:04,852 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37665 2024-11-18T22:42:04,856 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37665 2024-11-18T22:42:04,857 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37665 2024-11-18T22:42:04,858 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37665 2024-11-18T22:42:04,878 DEBUG [M:0;3b0ca91ed64a:38467 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3b0ca91ed64a:38467 2024-11-18T22:42:04,884 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3b0ca91ed64a,38467,1731969724293 2024-11-18T22:42:04,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T22:42:04,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37665-0x101501caac50001, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T22:42:04,981 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3b0ca91ed64a,38467,1731969724293 2024-11-18T22:42:05,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37665-0x101501caac50001, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T22:42:05,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:05,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37665-0x101501caac50001, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:05,113 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T22:42:05,114 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3b0ca91ed64a,38467,1731969724293 from backup master directory 2024-11-18T22:42:05,223 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37665-0x101501caac50001, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T22:42:05,223 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3b0ca91ed64a,38467,1731969724293 2024-11-18T22:42:05,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T22:42:05,224 WARN [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T22:42:05,224 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3b0ca91ed64a,38467,1731969724293 2024-11-18T22:42:05,232 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/hbase.id] with ID: cbad9f65-694f-4da8-92f2-1fd5ff1a7a1b 2024-11-18T22:42:05,232 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/.tmp/hbase.id 2024-11-18T22:42:05,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38265 is added to blk_1073741826_1002 (size=42) 2024-11-18T22:42:05,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37783 is added to blk_1073741826_1002 (size=42) 2024-11-18T22:42:05,243 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/.tmp/hbase.id]:[hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/hbase.id] 2024-11-18T22:42:05,259 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:42:05,259 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T22:42:05,262 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 3ms. 2024-11-18T22:42:05,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37665-0x101501caac50001, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:05,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:05,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38265 is added to blk_1073741827_1003 (size=196) 2024-11-18T22:42:05,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37783 is added to blk_1073741827_1003 (size=196) 2024-11-18T22:42:05,310 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T22:42:05,311 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T22:42:05,312 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T22:42:05,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37783 is added to blk_1073741828_1004 (size=1189) 2024-11-18T22:42:05,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38265 is added to blk_1073741828_1004 (size=1189) 2024-11-18T22:42:05,321 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/data/master/store 2024-11-18T22:42:05,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37783 is added to blk_1073741829_1005 (size=34) 2024-11-18T22:42:05,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38265 is added to blk_1073741829_1005 (size=34) 2024-11-18T22:42:05,330 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:42:05,331 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T22:42:05,331 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:42:05,331 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:42:05,331 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T22:42:05,331 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:42:05,331 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:42:05,331 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731969725331Disabling compacts and flushes for region at 1731969725331Disabling writes for close at 1731969725331Writing region close event to WAL at 1731969725331Closed at 1731969725331 2024-11-18T22:42:05,332 WARN [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/data/master/store/.initializing 2024-11-18T22:42:05,333 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/WALs/3b0ca91ed64a,38467,1731969724293 2024-11-18T22:42:05,337 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3b0ca91ed64a%2C38467%2C1731969724293, suffix=, logDir=hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/WALs/3b0ca91ed64a,38467,1731969724293, archiveDir=hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/oldWALs, maxLogs=10 2024-11-18T22:42:05,337 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C38467%2C1731969724293.1731969725337 2024-11-18T22:42:05,344 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/WALs/3b0ca91ed64a,38467,1731969724293/3b0ca91ed64a%2C38467%2C1731969724293.1731969725337 2024-11-18T22:42:05,348 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41993:41993),(127.0.0.1/127.0.0.1:36889:36889)] 2024-11-18T22:42:05,352 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T22:42:05,352 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:42:05,352 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:42:05,353 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:42:05,354 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:42:05,356 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T22:42:05,356 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:42:05,357 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:42:05,357 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:42:05,359 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T22:42:05,359 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:42:05,359 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T22:42:05,359 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:42:05,361 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T22:42:05,361 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:42:05,362 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T22:42:05,362 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:42:05,364 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T22:42:05,364 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:42:05,365 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T22:42:05,365 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:42:05,366 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:42:05,367 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:42:05,368 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:42:05,368 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:42:05,369 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T22:42:05,371 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:42:05,374 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T22:42:05,375 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=821132, jitterRate=0.04412400722503662}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T22:42:05,376 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731969725353Initializing all the Stores at 1731969725354 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969725354Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969725354Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969725354Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969725354Cleaning up temporary data from old regions at 1731969725368 (+14 ms)Region opened successfully at 1731969725376 (+8 ms) 2024-11-18T22:42:05,376 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T22:42:05,382 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53aa6c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3b0ca91ed64a/172.17.0.2:0 2024-11-18T22:42:05,383 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T22:42:05,383 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T22:42:05,384 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T22:42:05,384 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T22:42:05,385 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-18T22:42:05,386 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T22:42:05,386 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T22:42:05,389 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T22:42:05,390 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T22:42:05,484 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T22:42:05,485 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T22:42:05,486 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T22:42:05,531 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T22:42:05,532 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T22:42:05,533 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T22:42:05,667 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T22:42:05,669 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T22:42:05,688 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T22:42:05,692 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T22:42:05,777 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T22:42:05,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37665-0x101501caac50001, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T22:42:05,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37665-0x101501caac50001, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:05,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T22:42:05,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:05,857 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3b0ca91ed64a,38467,1731969724293, sessionid=0x101501caac50000, setting cluster-up flag (Was=false) 2024-11-18T22:42:05,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37665-0x101501caac50001, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:05,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:06,030 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T22:42:06,032 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3b0ca91ed64a,38467,1731969724293 2024-11-18T22:42:06,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:06,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37665-0x101501caac50001, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:06,083 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T22:42:06,085 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3b0ca91ed64a,38467,1731969724293 2024-11-18T22:42:06,088 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T22:42:06,098 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T22:42:06,098 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T22:42:06,098 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T22:42:06,099 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3b0ca91ed64a,38467,1731969724293 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T22:42:06,117 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3b0ca91ed64a:0, corePoolSize=5, maxPoolSize=5 2024-11-18T22:42:06,117 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3b0ca91ed64a:0, corePoolSize=5, maxPoolSize=5 2024-11-18T22:42:06,117 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=5, maxPoolSize=5 2024-11-18T22:42:06,117 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=5, maxPoolSize=5 2024-11-18T22:42:06,118 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3b0ca91ed64a:0, corePoolSize=10, maxPoolSize=10 2024-11-18T22:42:06,118 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:06,118 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=2, maxPoolSize=2 2024-11-18T22:42:06,118 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:06,135 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731969756134 2024-11-18T22:42:06,135 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T22:42:06,135 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T22:42:06,135 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T22:42:06,135 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T22:42:06,136 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T22:42:06,136 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T22:42:06,136 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T22:42:06,136 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T22:42:06,138 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:06,139 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T22:42:06,139 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T22:42:06,139 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T22:42:06,140 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T22:42:06,140 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T22:42:06,143 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:42:06,143 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T22:42:06,144 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.large.0-1731969726140,5,FailOnTimeoutGroup] 2024-11-18T22:42:06,148 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.small.0-1731969726144,5,FailOnTimeoutGroup] 2024-11-18T22:42:06,148 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:06,148 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T22:42:06,149 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:06,149 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:06,192 INFO [RS:0;3b0ca91ed64a:37665 {}] regionserver.HRegionServer(746): ClusterId : cbad9f65-694f-4da8-92f2-1fd5ff1a7a1b 2024-11-18T22:42:06,192 DEBUG [RS:0;3b0ca91ed64a:37665 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T22:42:06,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38265 is added to blk_1073741831_1007 (size=1321) 2024-11-18T22:42:06,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37783 is added to blk_1073741831_1007 (size=1321) 2024-11-18T22:42:06,222 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T22:42:06,222 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb 2024-11-18T22:42:06,280 DEBUG [RS:0;3b0ca91ed64a:37665 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T22:42:06,280 DEBUG [RS:0;3b0ca91ed64a:37665 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T22:42:06,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37783 is added to blk_1073741832_1008 (size=32) 2024-11-18T22:42:06,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38265 is added to blk_1073741832_1008 (size=32) 2024-11-18T22:42:06,291 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:42:06,301 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T22:42:06,315 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T22:42:06,315 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:42:06,317 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:42:06,318 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T22:42:06,322 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T22:42:06,322 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:42:06,325 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:42:06,325 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T22:42:06,330 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T22:42:06,331 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:42:06,332 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:42:06,333 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T22:42:06,354 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T22:42:06,354 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:42:06,355 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:42:06,356 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T22:42:06,363 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/data/hbase/meta/1588230740 2024-11-18T22:42:06,368 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/data/hbase/meta/1588230740 2024-11-18T22:42:06,373 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T22:42:06,374 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T22:42:06,376 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T22:42:06,378 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T22:42:06,391 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T22:42:06,400 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=691069, jitterRate=-0.12126076221466064}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T22:42:06,401 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731969726291Initializing all the Stores at 1731969726297 (+6 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969726297Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969726301 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969726301Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969726301Cleaning up temporary data from old regions at 1731969726374 (+73 ms)Region opened successfully at 1731969726401 (+27 ms) 2024-11-18T22:42:06,402 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T22:42:06,402 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T22:42:06,402 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T22:42:06,402 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T22:42:06,402 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T22:42:06,403 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T22:42:06,404 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731969726402Disabling compacts and flushes for region at 1731969726402Disabling writes for close at 1731969726402Writing region close event to WAL at 1731969726403 (+1 ms)Closed at 1731969726403 2024-11-18T22:42:06,407 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T22:42:06,408 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T22:42:06,408 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T22:42:06,417 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T22:42:06,422 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T22:42:06,490 DEBUG [RS:0;3b0ca91ed64a:37665 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T22:42:06,491 DEBUG [RS:0;3b0ca91ed64a:37665 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1de6cd21, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3b0ca91ed64a/172.17.0.2:0 2024-11-18T22:42:06,512 DEBUG [RS:0;3b0ca91ed64a:37665 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3b0ca91ed64a:37665 2024-11-18T22:42:06,512 INFO [RS:0;3b0ca91ed64a:37665 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T22:42:06,512 INFO [RS:0;3b0ca91ed64a:37665 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T22:42:06,513 DEBUG [RS:0;3b0ca91ed64a:37665 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T22:42:06,514 INFO [RS:0;3b0ca91ed64a:37665 {}] regionserver.HRegionServer(2659): reportForDuty to master=3b0ca91ed64a,38467,1731969724293 with port=37665, startcode=1731969724733 2024-11-18T22:42:06,515 DEBUG [RS:0;3b0ca91ed64a:37665 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T22:42:06,523 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58483, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T22:42:06,524 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38467 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3b0ca91ed64a,37665,1731969724733 2024-11-18T22:42:06,524 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38467 {}] master.ServerManager(517): Registering regionserver=3b0ca91ed64a,37665,1731969724733 2024-11-18T22:42:06,531 DEBUG [RS:0;3b0ca91ed64a:37665 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb 2024-11-18T22:42:06,531 DEBUG [RS:0;3b0ca91ed64a:37665 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43985 2024-11-18T22:42:06,531 DEBUG [RS:0;3b0ca91ed64a:37665 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T22:42:06,572 WARN [3b0ca91ed64a:38467 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-18T22:42:06,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T22:42:06,671 DEBUG [RS:0;3b0ca91ed64a:37665 {}] zookeeper.ZKUtil(111): regionserver:37665-0x101501caac50001, quorum=127.0.0.1:58703, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3b0ca91ed64a,37665,1731969724733 2024-11-18T22:42:06,671 WARN [RS:0;3b0ca91ed64a:37665 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T22:42:06,671 INFO [RS:0;3b0ca91ed64a:37665 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T22:42:06,672 DEBUG [RS:0;3b0ca91ed64a:37665 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/WALs/3b0ca91ed64a,37665,1731969724733 2024-11-18T22:42:06,672 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3b0ca91ed64a,37665,1731969724733] 2024-11-18T22:42:06,682 INFO [RS:0;3b0ca91ed64a:37665 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T22:42:06,686 INFO [RS:0;3b0ca91ed64a:37665 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T22:42:06,687 INFO [RS:0;3b0ca91ed64a:37665 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T22:42:06,687 INFO [RS:0;3b0ca91ed64a:37665 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:06,688 INFO [RS:0;3b0ca91ed64a:37665 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T22:42:06,689 INFO [RS:0;3b0ca91ed64a:37665 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T22:42:06,690 INFO [RS:0;3b0ca91ed64a:37665 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:06,690 DEBUG [RS:0;3b0ca91ed64a:37665 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:06,690 DEBUG [RS:0;3b0ca91ed64a:37665 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:06,690 DEBUG [RS:0;3b0ca91ed64a:37665 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:06,690 DEBUG [RS:0;3b0ca91ed64a:37665 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:06,690 DEBUG [RS:0;3b0ca91ed64a:37665 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:06,690 DEBUG [RS:0;3b0ca91ed64a:37665 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3b0ca91ed64a:0, corePoolSize=2, maxPoolSize=2 2024-11-18T22:42:06,691 DEBUG [RS:0;3b0ca91ed64a:37665 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:06,691 DEBUG [RS:0;3b0ca91ed64a:37665 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:06,691 DEBUG [RS:0;3b0ca91ed64a:37665 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:06,691 DEBUG [RS:0;3b0ca91ed64a:37665 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:06,691 DEBUG [RS:0;3b0ca91ed64a:37665 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:06,691 DEBUG [RS:0;3b0ca91ed64a:37665 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:06,691 DEBUG [RS:0;3b0ca91ed64a:37665 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3b0ca91ed64a:0, corePoolSize=3, maxPoolSize=3 2024-11-18T22:42:06,691 DEBUG [RS:0;3b0ca91ed64a:37665 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0, corePoolSize=3, maxPoolSize=3 2024-11-18T22:42:06,700 INFO [RS:0;3b0ca91ed64a:37665 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:06,700 INFO [RS:0;3b0ca91ed64a:37665 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:06,700 INFO [RS:0;3b0ca91ed64a:37665 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:06,701 INFO [RS:0;3b0ca91ed64a:37665 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:06,701 INFO [RS:0;3b0ca91ed64a:37665 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:06,701 INFO [RS:0;3b0ca91ed64a:37665 {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,37665,1731969724733-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T22:42:06,725 INFO [RS:0;3b0ca91ed64a:37665 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T22:42:06,725 INFO [RS:0;3b0ca91ed64a:37665 {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,37665,1731969724733-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:06,725 INFO [RS:0;3b0ca91ed64a:37665 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:06,725 INFO [RS:0;3b0ca91ed64a:37665 {}] regionserver.Replication(171): 3b0ca91ed64a,37665,1731969724733 started 2024-11-18T22:42:06,751 INFO [RS:0;3b0ca91ed64a:37665 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:06,752 INFO [RS:0;3b0ca91ed64a:37665 {}] regionserver.HRegionServer(1482): Serving as 3b0ca91ed64a,37665,1731969724733, RpcServer on 3b0ca91ed64a/172.17.0.2:37665, sessionid=0x101501caac50001 2024-11-18T22:42:06,752 DEBUG [RS:0;3b0ca91ed64a:37665 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T22:42:06,752 DEBUG [RS:0;3b0ca91ed64a:37665 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3b0ca91ed64a,37665,1731969724733 2024-11-18T22:42:06,752 DEBUG [RS:0;3b0ca91ed64a:37665 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3b0ca91ed64a,37665,1731969724733' 2024-11-18T22:42:06,752 DEBUG [RS:0;3b0ca91ed64a:37665 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T22:42:06,756 DEBUG [RS:0;3b0ca91ed64a:37665 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T22:42:06,764 DEBUG [RS:0;3b0ca91ed64a:37665 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T22:42:06,764 DEBUG [RS:0;3b0ca91ed64a:37665 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T22:42:06,764 DEBUG [RS:0;3b0ca91ed64a:37665 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3b0ca91ed64a,37665,1731969724733 2024-11-18T22:42:06,764 DEBUG [RS:0;3b0ca91ed64a:37665 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3b0ca91ed64a,37665,1731969724733' 2024-11-18T22:42:06,764 DEBUG [RS:0;3b0ca91ed64a:37665 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T22:42:06,772 DEBUG [RS:0;3b0ca91ed64a:37665 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T22:42:06,788 DEBUG [RS:0;3b0ca91ed64a:37665 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T22:42:06,788 INFO [RS:0;3b0ca91ed64a:37665 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T22:42:06,788 INFO [RS:0;3b0ca91ed64a:37665 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T22:42:06,895 INFO [RS:0;3b0ca91ed64a:37665 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3b0ca91ed64a%2C37665%2C1731969724733, suffix=, logDir=hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/WALs/3b0ca91ed64a,37665,1731969724733, archiveDir=hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/oldWALs, maxLogs=32 2024-11-18T22:42:06,898 INFO [RS:0;3b0ca91ed64a:37665 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C37665%2C1731969724733.1731969726898 2024-11-18T22:42:06,962 INFO [RS:0;3b0ca91ed64a:37665 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/WALs/3b0ca91ed64a,37665,1731969724733/3b0ca91ed64a%2C37665%2C1731969724733.1731969726898 2024-11-18T22:42:06,983 DEBUG [RS:0;3b0ca91ed64a:37665 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41993:41993),(127.0.0.1/127.0.0.1:36889:36889)] 2024-11-18T22:42:07,074 DEBUG [3b0ca91ed64a:38467 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T22:42:07,076 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3b0ca91ed64a,37665,1731969724733 2024-11-18T22:42:07,088 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3b0ca91ed64a,37665,1731969724733, state=OPENING 2024-11-18T22:42:07,140 DEBUG [PEWorker-4 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T22:42:07,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37665-0x101501caac50001, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:07,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:07,186 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T22:42:07,186 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3b0ca91ed64a,37665,1731969724733}] 2024-11-18T22:42:07,196 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T22:42:07,197 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T22:42:07,236 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T22:42:07,236 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T22:42:07,237 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-18T22:42:07,351 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T22:42:07,358 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52349, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T22:42:07,366 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T22:42:07,367 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T22:42:07,370 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3b0ca91ed64a%2C37665%2C1731969724733.meta, suffix=.meta, logDir=hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/WALs/3b0ca91ed64a,37665,1731969724733, archiveDir=hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/oldWALs, maxLogs=32 2024-11-18T22:42:07,373 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C37665%2C1731969724733.meta.1731969727373.meta 2024-11-18T22:42:07,415 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/WALs/3b0ca91ed64a,37665,1731969724733/3b0ca91ed64a%2C37665%2C1731969724733.meta.1731969727373.meta 2024-11-18T22:42:07,431 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41993:41993),(127.0.0.1/127.0.0.1:36889:36889)] 2024-11-18T22:42:07,440 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T22:42:07,441 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T22:42:07,441 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T22:42:07,441 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T22:42:07,441 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T22:42:07,441 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:42:07,442 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T22:42:07,442 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T22:42:07,444 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T22:42:07,446 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T22:42:07,446 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:42:07,447 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:42:07,447 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T22:42:07,449 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T22:42:07,449 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:42:07,451 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:42:07,451 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T22:42:07,457 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T22:42:07,458 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:42:07,458 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:42:07,459 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T22:42:07,460 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T22:42:07,461 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:42:07,461 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:42:07,462 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T22:42:07,463 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/data/hbase/meta/1588230740 2024-11-18T22:42:07,464 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/data/hbase/meta/1588230740 2024-11-18T22:42:07,466 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T22:42:07,466 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T22:42:07,467 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T22:42:07,471 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T22:42:07,476 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=821322, jitterRate=0.04436565935611725}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T22:42:07,476 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T22:42:07,485 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731969727442Writing region info on filesystem at 1731969727442Initializing all the Stores at 1731969727443 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969727443Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969727444 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969727444Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969727444Cleaning up temporary data from old regions at 1731969727466 (+22 ms)Running coprocessor post-open hooks at 1731969727476 (+10 ms)Region opened successfully at 1731969727484 (+8 ms) 2024-11-18T22:42:07,492 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731969727351 2024-11-18T22:42:07,496 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T22:42:07,496 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T22:42:07,498 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3b0ca91ed64a,37665,1731969724733 2024-11-18T22:42:07,499 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3b0ca91ed64a,37665,1731969724733, state=OPEN 2024-11-18T22:42:07,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37665-0x101501caac50001, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T22:42:07,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T22:42:07,676 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T22:42:07,676 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3b0ca91ed64a,37665,1731969724733 2024-11-18T22:42:07,676 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T22:42:07,681 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T22:42:07,681 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3b0ca91ed64a,37665,1731969724733 in 490 msec 2024-11-18T22:42:07,685 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T22:42:07,685 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.2730 sec 2024-11-18T22:42:07,688 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T22:42:07,688 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T22:42:07,691 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T22:42:07,691 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3b0ca91ed64a,37665,1731969724733, seqNum=-1] 2024-11-18T22:42:07,692 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T22:42:07,694 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35711, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T22:42:07,704 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.6130 sec 2024-11-18T22:42:07,704 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731969727704, completionTime=-1 2024-11-18T22:42:07,704 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T22:42:07,704 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-18T22:42:07,706 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-18T22:42:07,706 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731969787706 2024-11-18T22:42:07,706 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731969847706 2024-11-18T22:42:07,706 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-18T22:42:07,707 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,38467,1731969724293-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:07,707 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,38467,1731969724293-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:07,707 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,38467,1731969724293-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:07,707 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3b0ca91ed64a:38467, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:07,707 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:07,707 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:07,709 DEBUG [master/3b0ca91ed64a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T22:42:07,712 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.488sec 2024-11-18T22:42:07,712 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T22:42:07,712 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T22:42:07,712 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T22:42:07,712 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T22:42:07,712 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T22:42:07,712 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,38467,1731969724293-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T22:42:07,713 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,38467,1731969724293-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T22:42:07,716 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T22:42:07,716 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T22:42:07,716 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,38467,1731969724293-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:07,775 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60e4d7ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T22:42:07,775 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3b0ca91ed64a,38467,-1 for getting cluster id 2024-11-18T22:42:07,775 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T22:42:07,777 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'cbad9f65-694f-4da8-92f2-1fd5ff1a7a1b' 2024-11-18T22:42:07,778 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T22:42:07,778 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "cbad9f65-694f-4da8-92f2-1fd5ff1a7a1b" 2024-11-18T22:42:07,778 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e7fafbd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T22:42:07,778 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3b0ca91ed64a,38467,-1] 2024-11-18T22:42:07,778 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T22:42:07,779 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:42:07,780 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39304, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T22:42:07,781 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69958721, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T22:42:07,782 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T22:42:07,783 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3b0ca91ed64a,37665,1731969724733, seqNum=-1] 2024-11-18T22:42:07,783 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T22:42:07,785 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48794, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T22:42:07,787 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3b0ca91ed64a,38467,1731969724293 2024-11-18T22:42:07,788 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:42:07,792 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T22:42:07,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T22:42:07,792 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T22:42:07,793 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T22:42:07,793 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:42:07,793 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:42:07,793 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T22:42:07,793 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T22:42:07,793 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1705327446, stopped=false 2024-11-18T22:42:07,793 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3b0ca91ed64a,38467,1731969724293 2024-11-18T22:42:07,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37665-0x101501caac50001, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T22:42:07,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T22:42:07,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37665-0x101501caac50001, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:07,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:07,809 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T22:42:07,809 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T22:42:07,809 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T22:42:07,809 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:42:07,809 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37665-0x101501caac50001, quorum=127.0.0.1:58703, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:42:07,809 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:42:07,810 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3b0ca91ed64a,37665,1731969724733' ***** 2024-11-18T22:42:07,810 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T22:42:07,810 INFO [RS:0;3b0ca91ed64a:37665 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T22:42:07,810 INFO [RS:0;3b0ca91ed64a:37665 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T22:42:07,810 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T22:42:07,810 INFO [RS:0;3b0ca91ed64a:37665 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T22:42:07,810 INFO [RS:0;3b0ca91ed64a:37665 {}] regionserver.HRegionServer(959): stopping server 3b0ca91ed64a,37665,1731969724733 2024-11-18T22:42:07,810 INFO [RS:0;3b0ca91ed64a:37665 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T22:42:07,810 INFO [RS:0;3b0ca91ed64a:37665 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3b0ca91ed64a:37665. 2024-11-18T22:42:07,810 DEBUG [RS:0;3b0ca91ed64a:37665 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T22:42:07,810 DEBUG [RS:0;3b0ca91ed64a:37665 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:42:07,811 INFO [RS:0;3b0ca91ed64a:37665 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T22:42:07,811 INFO [RS:0;3b0ca91ed64a:37665 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T22:42:07,811 INFO [RS:0;3b0ca91ed64a:37665 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T22:42:07,811 INFO [RS:0;3b0ca91ed64a:37665 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T22:42:07,811 INFO [RS:0;3b0ca91ed64a:37665 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-18T22:42:07,811 DEBUG [RS:0;3b0ca91ed64a:37665 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-18T22:42:07,811 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T22:42:07,811 DEBUG [RS:0;3b0ca91ed64a:37665 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-18T22:42:07,811 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T22:42:07,811 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T22:42:07,811 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T22:42:07,811 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T22:42:07,812 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-18T22:42:07,831 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/data/hbase/meta/1588230740/.tmp/ns/95752e861e1245c0826c4e0ce29b06af is 43, key is default/ns:d/1731969727696/Put/seqid=0 2024-11-18T22:42:07,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38265 is added to blk_1073741835_1011 (size=5153) 2024-11-18T22:42:07,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37783 is added to blk_1073741835_1011 (size=5153) 2024-11-18T22:42:07,838 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/data/hbase/meta/1588230740/.tmp/ns/95752e861e1245c0826c4e0ce29b06af 2024-11-18T22:42:07,848 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/data/hbase/meta/1588230740/.tmp/ns/95752e861e1245c0826c4e0ce29b06af as hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/data/hbase/meta/1588230740/ns/95752e861e1245c0826c4e0ce29b06af 2024-11-18T22:42:07,861 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/data/hbase/meta/1588230740/ns/95752e861e1245c0826c4e0ce29b06af, entries=2, sequenceid=6, filesize=5.0 K 2024-11-18T22:42:07,864 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 53ms, sequenceid=6, compaction requested=false 2024-11-18T22:42:07,865 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-18T22:42:07,888 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-18T22:42:07,890 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T22:42:07,891 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T22:42:07,891 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731969727811Running coprocessor pre-close hooks at 1731969727811Disabling compacts and flushes for region at 1731969727811Disabling writes for close at 1731969727811Obtaining lock to block concurrent updates at 1731969727812 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731969727812Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731969727812Flushing stores of hbase:meta,,1.1588230740 at 1731969727813 (+1 ms)Flushing 1588230740/ns: creating writer at 1731969727814 (+1 ms)Flushing 1588230740/ns: appending metadata at 1731969727830 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731969727830Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2ab74e07: reopening flushed file at 1731969727847 (+17 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 53ms, sequenceid=6, compaction requested=false at 1731969727865 (+18 ms)Writing region close event to WAL at 1731969727867 (+2 ms)Running coprocessor post-close hooks at 1731969727890 (+23 ms)Closed at 1731969727891 (+1 ms) 2024-11-18T22:42:07,891 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T22:42:08,011 INFO [RS:0;3b0ca91ed64a:37665 {}] regionserver.HRegionServer(976): stopping server 3b0ca91ed64a,37665,1731969724733; all regions closed. 2024-11-18T22:42:08,012 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:08,012 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:08,013 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:08,016 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:08,016 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:08,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38265 is added to blk_1073741834_1010 (size=1152) 2024-11-18T22:42:08,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37783 is added to blk_1073741834_1010 (size=1152) 2024-11-18T22:42:08,427 DEBUG [RS:0;3b0ca91ed64a:37665 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/oldWALs 2024-11-18T22:42:08,427 INFO [RS:0;3b0ca91ed64a:37665 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3b0ca91ed64a%2C37665%2C1731969724733.meta:.meta(num 1731969727373) 2024-11-18T22:42:08,427 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:08,428 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:08,428 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:08,428 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:08,428 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:08,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38265 is added to blk_1073741833_1009 (size=93) 2024-11-18T22:42:08,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37783 is added to blk_1073741833_1009 (size=93) 2024-11-18T22:42:08,443 DEBUG [RS:0;3b0ca91ed64a:37665 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/oldWALs 2024-11-18T22:42:08,443 INFO [RS:0;3b0ca91ed64a:37665 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3b0ca91ed64a%2C37665%2C1731969724733:(num 1731969726898) 2024-11-18T22:42:08,443 DEBUG [RS:0;3b0ca91ed64a:37665 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:42:08,443 INFO [RS:0;3b0ca91ed64a:37665 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T22:42:08,443 INFO [RS:0;3b0ca91ed64a:37665 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T22:42:08,443 INFO [RS:0;3b0ca91ed64a:37665 {}] hbase.ChoreService(370): Chore service for: regionserver/3b0ca91ed64a:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-18T22:42:08,444 INFO [RS:0;3b0ca91ed64a:37665 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T22:42:08,444 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T22:42:08,444 INFO [RS:0;3b0ca91ed64a:37665 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37665 2024-11-18T22:42:08,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37665-0x101501caac50001, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3b0ca91ed64a,37665,1731969724733 2024-11-18T22:42:08,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T22:42:08,451 INFO [RS:0;3b0ca91ed64a:37665 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T22:42:08,461 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3b0ca91ed64a,37665,1731969724733] 2024-11-18T22:42:08,472 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3b0ca91ed64a,37665,1731969724733 already deleted, retry=false 2024-11-18T22:42:08,472 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3b0ca91ed64a,37665,1731969724733 expired; onlineServers=0 2024-11-18T22:42:08,472 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3b0ca91ed64a,38467,1731969724293' ***** 2024-11-18T22:42:08,472 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T22:42:08,472 INFO [M:0;3b0ca91ed64a:38467 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T22:42:08,472 INFO [M:0;3b0ca91ed64a:38467 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T22:42:08,472 DEBUG [M:0;3b0ca91ed64a:38467 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T22:42:08,473 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T22:42:08,473 DEBUG [M:0;3b0ca91ed64a:38467 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T22:42:08,473 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.large.0-1731969726140 {}] cleaner.HFileCleaner(306): Exit Thread[master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.large.0-1731969726140,5,FailOnTimeoutGroup] 2024-11-18T22:42:08,473 INFO [M:0;3b0ca91ed64a:38467 {}] hbase.ChoreService(370): Chore service for: master/3b0ca91ed64a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T22:42:08,473 INFO [M:0;3b0ca91ed64a:38467 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T22:42:08,473 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.small.0-1731969726144 {}] cleaner.HFileCleaner(306): Exit Thread[master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.small.0-1731969726144,5,FailOnTimeoutGroup] 2024-11-18T22:42:08,473 DEBUG [M:0;3b0ca91ed64a:38467 {}] master.HMaster(1795): Stopping service threads 2024-11-18T22:42:08,473 INFO [M:0;3b0ca91ed64a:38467 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T22:42:08,473 INFO [M:0;3b0ca91ed64a:38467 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T22:42:08,474 INFO [M:0;3b0ca91ed64a:38467 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T22:42:08,474 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T22:42:08,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T22:42:08,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:08,483 DEBUG [M:0;3b0ca91ed64a:38467 {}] zookeeper.ZKUtil(347): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T22:42:08,483 WARN [M:0;3b0ca91ed64a:38467 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T22:42:08,484 INFO [M:0;3b0ca91ed64a:38467 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/.lastflushedseqids 2024-11-18T22:42:08,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38265 is added to blk_1073741836_1012 (size=99) 2024-11-18T22:42:08,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37783 is added to blk_1073741836_1012 (size=99) 2024-11-18T22:42:08,497 INFO [M:0;3b0ca91ed64a:38467 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T22:42:08,498 INFO [M:0;3b0ca91ed64a:38467 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T22:42:08,498 DEBUG [M:0;3b0ca91ed64a:38467 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T22:42:08,498 INFO [M:0;3b0ca91ed64a:38467 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:42:08,498 DEBUG [M:0;3b0ca91ed64a:38467 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:42:08,498 DEBUG [M:0;3b0ca91ed64a:38467 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T22:42:08,498 DEBUG [M:0;3b0ca91ed64a:38467 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:42:08,498 INFO [M:0;3b0ca91ed64a:38467 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-18T22:42:08,534 DEBUG [M:0;3b0ca91ed64a:38467 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/208d87ddca544716ade9fa0451941779 is 82, key is hbase:meta,,1/info:regioninfo/1731969727497/Put/seqid=0 2024-11-18T22:42:08,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37783 is added to blk_1073741837_1013 (size=5672) 2024-11-18T22:42:08,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38265 is added to blk_1073741837_1013 (size=5672) 2024-11-18T22:42:08,561 INFO [M:0;3b0ca91ed64a:38467 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/208d87ddca544716ade9fa0451941779 2024-11-18T22:42:08,562 INFO [RS:0;3b0ca91ed64a:37665 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T22:42:08,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37665-0x101501caac50001, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:42:08,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37665-0x101501caac50001, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:42:08,562 INFO [RS:0;3b0ca91ed64a:37665 {}] regionserver.HRegionServer(1031): Exiting; stopping=3b0ca91ed64a,37665,1731969724733; zookeeper connection closed. 2024-11-18T22:42:08,568 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@62d8322 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@62d8322 2024-11-18T22:42:08,572 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T22:42:08,601 DEBUG [M:0;3b0ca91ed64a:38467 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d6a2131c8361490c8fbe129da6785406 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731969727703/Put/seqid=0 2024-11-18T22:42:08,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37783 is added to blk_1073741838_1014 (size=5275) 2024-11-18T22:42:08,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38265 is added to blk_1073741838_1014 (size=5275) 2024-11-18T22:42:08,642 INFO [M:0;3b0ca91ed64a:38467 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d6a2131c8361490c8fbe129da6785406 2024-11-18T22:42:08,675 DEBUG [M:0;3b0ca91ed64a:38467 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4be451ac3d024fb88b9d668310920ac8 is 69, key is 3b0ca91ed64a,37665,1731969724733/rs:state/1731969726524/Put/seqid=0 2024-11-18T22:42:08,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38265 is added to blk_1073741839_1015 (size=5156) 2024-11-18T22:42:08,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37783 is added to blk_1073741839_1015 (size=5156) 2024-11-18T22:42:08,687 INFO [M:0;3b0ca91ed64a:38467 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4be451ac3d024fb88b9d668310920ac8 2024-11-18T22:42:08,715 DEBUG [M:0;3b0ca91ed64a:38467 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b4168cf71bdc48daa371a84a0713271e is 52, key is load_balancer_on/state:d/1731969727791/Put/seqid=0 2024-11-18T22:42:08,716 INFO [regionserver/3b0ca91ed64a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T22:42:08,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38265 is added to blk_1073741840_1016 (size=5056) 2024-11-18T22:42:08,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37783 is added to blk_1073741840_1016 (size=5056) 2024-11-18T22:42:08,736 INFO [M:0;3b0ca91ed64a:38467 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b4168cf71bdc48daa371a84a0713271e 2024-11-18T22:42:08,757 DEBUG [M:0;3b0ca91ed64a:38467 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/208d87ddca544716ade9fa0451941779 as hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/208d87ddca544716ade9fa0451941779 2024-11-18T22:42:08,794 INFO [M:0;3b0ca91ed64a:38467 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/208d87ddca544716ade9fa0451941779, entries=8, sequenceid=29, filesize=5.5 K 2024-11-18T22:42:08,805 DEBUG [M:0;3b0ca91ed64a:38467 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d6a2131c8361490c8fbe129da6785406 as hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d6a2131c8361490c8fbe129da6785406 2024-11-18T22:42:08,837 INFO [M:0;3b0ca91ed64a:38467 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d6a2131c8361490c8fbe129da6785406, entries=3, sequenceid=29, filesize=5.2 K 2024-11-18T22:42:08,839 DEBUG [M:0;3b0ca91ed64a:38467 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4be451ac3d024fb88b9d668310920ac8 as hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4be451ac3d024fb88b9d668310920ac8 2024-11-18T22:42:08,883 INFO [M:0;3b0ca91ed64a:38467 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4be451ac3d024fb88b9d668310920ac8, entries=1, sequenceid=29, filesize=5.0 K 2024-11-18T22:42:08,889 DEBUG [M:0;3b0ca91ed64a:38467 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b4168cf71bdc48daa371a84a0713271e as hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b4168cf71bdc48daa371a84a0713271e 2024-11-18T22:42:08,914 INFO [M:0;3b0ca91ed64a:38467 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43985/user/jenkins/test-data/c89deebb-5e3f-6819-abaf-ae00d1798ddb/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b4168cf71bdc48daa371a84a0713271e, entries=1, sequenceid=29, filesize=4.9 K 2024-11-18T22:42:08,921 INFO [M:0;3b0ca91ed64a:38467 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 423ms, sequenceid=29, compaction requested=false 2024-11-18T22:42:08,958 INFO [M:0;3b0ca91ed64a:38467 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:42:08,964 DEBUG [M:0;3b0ca91ed64a:38467 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731969728498Disabling compacts and flushes for region at 1731969728498Disabling writes for close at 1731969728498Obtaining lock to block concurrent updates at 1731969728498Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731969728498Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731969728499 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731969728508 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731969728509 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731969728533 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731969728534 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731969728574 (+40 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731969728600 (+26 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731969728600Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731969728650 (+50 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731969728675 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731969728675Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731969728695 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731969728714 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731969728714Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@261d42fd: reopening flushed file at 1731969728751 (+37 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6fb470c5: reopening flushed file at 1731969728795 (+44 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a0e7982: reopening flushed file at 1731969728837 (+42 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6df7d1ae: reopening flushed file at 1731969728883 (+46 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 423ms, sequenceid=29, compaction requested=false at 1731969728922 (+39 ms)Writing region close event to WAL at 1731969728958 (+36 ms)Closed at 1731969728958 2024-11-18T22:42:08,968 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:08,968 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:08,972 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:08,980 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:08,981 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:08,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38265 is added to blk_1073741830_1006 (size=10311) 2024-11-18T22:42:08,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37783 is added to blk_1073741830_1006 (size=10311) 2024-11-18T22:42:09,007 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T22:42:09,007 INFO [M:0;3b0ca91ed64a:38467 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T22:42:09,007 INFO [M:0;3b0ca91ed64a:38467 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38467 2024-11-18T22:42:09,008 INFO [M:0;3b0ca91ed64a:38467 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T22:42:09,122 INFO [M:0;3b0ca91ed64a:38467 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T22:42:09,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:42:09,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38467-0x101501caac50000, quorum=127.0.0.1:58703, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:42:09,143 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4902a927{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:42:09,144 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4b1e120d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:42:09,144 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:42:09,144 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@72c1d7ea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:42:09,144 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2819a81f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/hadoop.log.dir/,STOPPED} 2024-11-18T22:42:09,149 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T22:42:09,149 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T22:42:09,152 WARN [BP-342864332-172.17.0.2-1731969720473 heartbeating to localhost/127.0.0.1:43985 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T22:42:09,152 WARN [BP-342864332-172.17.0.2-1731969720473 heartbeating to localhost/127.0.0.1:43985 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-342864332-172.17.0.2-1731969720473 (Datanode Uuid d67118bd-d782-4faf-8ed1-dbf02fd0787d) service to localhost/127.0.0.1:43985 2024-11-18T22:42:09,153 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/cluster_2fb986bd-7e53-d582-a190-4be5410664ee/data/data3/current/BP-342864332-172.17.0.2-1731969720473 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:42:09,153 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/cluster_2fb986bd-7e53-d582-a190-4be5410664ee/data/data4/current/BP-342864332-172.17.0.2-1731969720473 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:42:09,154 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T22:42:09,201 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7cf0399{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:42:09,202 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4a560f35{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:42:09,202 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:42:09,203 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@55633b2d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:42:09,203 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25652898{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/hadoop.log.dir/,STOPPED} 2024-11-18T22:42:09,231 WARN [BP-342864332-172.17.0.2-1731969720473 heartbeating to localhost/127.0.0.1:43985 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T22:42:09,231 WARN [BP-342864332-172.17.0.2-1731969720473 heartbeating to localhost/127.0.0.1:43985 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-342864332-172.17.0.2-1731969720473 (Datanode Uuid 0df769e5-50ab-4d76-a3fa-ab359020ce08) service to localhost/127.0.0.1:43985 2024-11-18T22:42:09,232 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/cluster_2fb986bd-7e53-d582-a190-4be5410664ee/data/data1/current/BP-342864332-172.17.0.2-1731969720473 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:42:09,232 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T22:42:09,233 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T22:42:09,233 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T22:42:09,236 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/cluster_2fb986bd-7e53-d582-a190-4be5410664ee/data/data2/current/BP-342864332-172.17.0.2-1731969720473 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:42:09,265 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7f317c08{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T22:42:09,277 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5262ced6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:42:09,277 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:42:09,278 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@db210e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:42:09,278 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46598b1f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/hadoop.log.dir/,STOPPED} 2024-11-18T22:42:09,299 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T22:42:09,339 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T22:42:09,340 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T22:42:09,340 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/hadoop.log.dir so I do NOT create it in target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d 2024-11-18T22:42:09,340 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/01dc95e3-a1ba-d727-6d5b-e4aa7e6cc154/hadoop.tmp.dir so I do NOT create it in target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d 2024-11-18T22:42:09,340 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d, deleteOnExit=true 2024-11-18T22:42:09,340 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T22:42:09,340 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/test.cache.data in system properties and HBase conf 2024-11-18T22:42:09,340 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T22:42:09,340 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/hadoop.log.dir in system properties and HBase conf 2024-11-18T22:42:09,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T22:42:09,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T22:42:09,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T22:42:09,341 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T22:42:09,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T22:42:09,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T22:42:09,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T22:42:09,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T22:42:09,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T22:42:09,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T22:42:09,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T22:42:09,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T22:42:09,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T22:42:09,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/nfs.dump.dir in system properties and HBase conf 2024-11-18T22:42:09,342 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/java.io.tmpdir in system properties and HBase conf 2024-11-18T22:42:09,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T22:42:09,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T22:42:09,343 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T22:42:09,362 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T22:42:09,754 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:42:09,765 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:42:09,771 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:42:09,772 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:42:09,772 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T22:42:09,779 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:42:09,780 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@774a68bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:42:09,781 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@41fecd82{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:42:09,944 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@367ccadf{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/java.io.tmpdir/jetty-localhost-33853-hadoop-hdfs-3_4_1-tests_jar-_-any-18257158888810855919/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T22:42:09,954 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@52b2db2b{HTTP/1.1, (http/1.1)}{localhost:33853} 2024-11-18T22:42:09,954 INFO [Time-limited test {}] server.Server(415): Started @123754ms 2024-11-18T22:42:09,974 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T22:42:10,376 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:42:10,386 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:42:10,390 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:42:10,390 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:42:10,390 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T22:42:10,397 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@acc902f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:42:10,402 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f0a9f88{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:42:10,567 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@503eedef{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/java.io.tmpdir/jetty-localhost-33495-hadoop-hdfs-3_4_1-tests_jar-_-any-7597362595292587585/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:42:10,568 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@46acec94{HTTP/1.1, (http/1.1)}{localhost:33495} 2024-11-18T22:42:10,568 INFO [Time-limited test {}] server.Server(415): Started @124368ms 2024-11-18T22:42:10,570 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T22:42:10,651 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:42:10,661 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:42:10,664 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:42:10,665 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:42:10,665 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T22:42:10,665 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6609519d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:42:10,666 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e633f36{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:42:10,784 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@377a86a0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/java.io.tmpdir/jetty-localhost-35763-hadoop-hdfs-3_4_1-tests_jar-_-any-8521372685286678347/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:42:10,785 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@79bab151{HTTP/1.1, (http/1.1)}{localhost:35763} 2024-11-18T22:42:10,785 INFO [Time-limited test {}] server.Server(415): Started @124585ms 2024-11-18T22:42:10,786 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T22:42:12,303 WARN [Thread-671 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data1/current/BP-1358070786-172.17.0.2-1731969729378/current, will proceed with Du for space computation calculation, 2024-11-18T22:42:12,304 WARN [Thread-672 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data2/current/BP-1358070786-172.17.0.2-1731969729378/current, will proceed with Du for space computation calculation, 2024-11-18T22:42:12,376 WARN [Thread-635 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T22:42:12,413 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x510c7c3a25365dfb with lease ID 0xce46b989a050747f: Processing first storage report for DS-ef8753ea-5241-4e99-9943-c350af2b0b10 from datanode DatanodeRegistration(127.0.0.1:46627, datanodeUuid=f84f5c24-5701-467c-9225-34011d55d5db, infoPort=42573, infoSecurePort=0, ipcPort=39279, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378) 2024-11-18T22:42:12,413 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x510c7c3a25365dfb with lease ID 0xce46b989a050747f: from storage DS-ef8753ea-5241-4e99-9943-c350af2b0b10 node DatanodeRegistration(127.0.0.1:46627, datanodeUuid=f84f5c24-5701-467c-9225-34011d55d5db, infoPort=42573, infoSecurePort=0, ipcPort=39279, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T22:42:12,418 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x510c7c3a25365dfb with lease ID 0xce46b989a050747f: Processing first storage report for DS-bf2f6f34-d787-44d2-aecb-d84084b3ea2e from datanode DatanodeRegistration(127.0.0.1:46627, datanodeUuid=f84f5c24-5701-467c-9225-34011d55d5db, infoPort=42573, infoSecurePort=0, ipcPort=39279, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378) 2024-11-18T22:42:12,419 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x510c7c3a25365dfb with lease ID 0xce46b989a050747f: from storage DS-bf2f6f34-d787-44d2-aecb-d84084b3ea2e node DatanodeRegistration(127.0.0.1:46627, datanodeUuid=f84f5c24-5701-467c-9225-34011d55d5db, infoPort=42573, infoSecurePort=0, ipcPort=39279, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:42:12,572 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data3/current/BP-1358070786-172.17.0.2-1731969729378/current, will proceed with Du for space computation calculation, 2024-11-18T22:42:12,576 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data4/current/BP-1358070786-172.17.0.2-1731969729378/current, will proceed with Du for space computation calculation, 2024-11-18T22:42:12,688 WARN [Thread-658 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T22:42:12,710 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2d79edd2b9ac1045 with lease ID 0xce46b989a0507480: Processing first storage report for DS-03f4388d-3160-4e19-9a7e-0049fe475e37 from datanode DatanodeRegistration(127.0.0.1:34209, datanodeUuid=b20a2362-d157-4d15-ba99-ab9b389d4072, infoPort=38303, infoSecurePort=0, ipcPort=45297, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378) 2024-11-18T22:42:12,710 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2d79edd2b9ac1045 with lease ID 0xce46b989a0507480: from storage DS-03f4388d-3160-4e19-9a7e-0049fe475e37 node DatanodeRegistration(127.0.0.1:34209, datanodeUuid=b20a2362-d157-4d15-ba99-ab9b389d4072, infoPort=38303, infoSecurePort=0, ipcPort=45297, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:42:12,710 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2d79edd2b9ac1045 with lease ID 0xce46b989a0507480: Processing first storage report for DS-3ed104d3-e58b-482e-b9c2-3743721007fc from datanode DatanodeRegistration(127.0.0.1:34209, datanodeUuid=b20a2362-d157-4d15-ba99-ab9b389d4072, infoPort=38303, infoSecurePort=0, ipcPort=45297, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378) 2024-11-18T22:42:12,710 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2d79edd2b9ac1045 with lease ID 0xce46b989a0507480: from storage DS-3ed104d3-e58b-482e-b9c2-3743721007fc node DatanodeRegistration(127.0.0.1:34209, datanodeUuid=b20a2362-d157-4d15-ba99-ab9b389d4072, infoPort=38303, infoSecurePort=0, ipcPort=45297, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T22:42:12,739 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T22:42:12,771 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d 2024-11-18T22:42:12,776 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:42:12,778 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:42:12,779 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:42:12,969 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/zookeeper_0, clientPort=49655, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T22:42:12,975 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49655 2024-11-18T22:42:12,975 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:42:12,977 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:42:13,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46627 is added to blk_1073741825_1001 (size=7) 2024-11-18T22:42:13,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34209 is added to blk_1073741825_1001 (size=7) 2024-11-18T22:42:13,440 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153 with version=8 2024-11-18T22:42:13,440 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/hbase-staging 2024-11-18T22:42:13,444 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3b0ca91ed64a:0 server-side Connection retries=45 2024-11-18T22:42:13,444 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:42:13,444 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T22:42:13,444 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T22:42:13,444 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:42:13,444 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T22:42:13,444 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T22:42:13,445 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T22:42:13,448 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36309 2024-11-18T22:42:13,451 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36309 connecting to ZooKeeper ensemble=127.0.0.1:49655 2024-11-18T22:42:13,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:363090x0, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T22:42:13,737 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36309-0x101501cccfb0000 connected 2024-11-18T22:42:14,111 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:42:14,114 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:42:14,120 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:42:14,121 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153, hbase.cluster.distributed=false 2024-11-18T22:42:14,137 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T22:42:14,140 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36309 2024-11-18T22:42:14,145 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36309 2024-11-18T22:42:14,152 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36309 2024-11-18T22:42:14,161 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36309 2024-11-18T22:42:14,163 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36309 2024-11-18T22:42:14,188 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3b0ca91ed64a:0 server-side Connection retries=45 2024-11-18T22:42:14,188 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:42:14,189 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T22:42:14,189 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T22:42:14,189 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:42:14,189 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T22:42:14,189 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T22:42:14,189 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T22:42:14,194 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35719 2024-11-18T22:42:14,196 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35719 connecting to ZooKeeper ensemble=127.0.0.1:49655 2024-11-18T22:42:14,197 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:42:14,203 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:42:14,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:357190x0, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T22:42:14,226 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:357190x0, quorum=127.0.0.1:49655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:42:14,226 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T22:42:14,232 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T22:42:14,233 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35719-0x101501cccfb0001 connected 2024-11-18T22:42:14,240 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35719-0x101501cccfb0001, quorum=127.0.0.1:49655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T22:42:14,260 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35719-0x101501cccfb0001, quorum=127.0.0.1:49655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T22:42:14,268 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35719 2024-11-18T22:42:14,272 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35719 2024-11-18T22:42:14,281 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35719 2024-11-18T22:42:14,304 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35719 2024-11-18T22:42:14,304 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35719 2024-11-18T22:42:14,336 DEBUG [M:0;3b0ca91ed64a:36309 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3b0ca91ed64a:36309 2024-11-18T22:42:14,337 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3b0ca91ed64a,36309,1731969733443 2024-11-18T22:42:14,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T22:42:14,353 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3b0ca91ed64a,36309,1731969733443 2024-11-18T22:42:14,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35719-0x101501cccfb0001, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T22:42:14,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:14,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35719-0x101501cccfb0001, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T22:42:14,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35719-0x101501cccfb0001, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:14,388 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T22:42:14,400 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3b0ca91ed64a,36309,1731969733443 from backup master directory 2024-11-18T22:42:14,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35719-0x101501cccfb0001, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T22:42:14,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3b0ca91ed64a,36309,1731969733443 2024-11-18T22:42:14,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T22:42:14,425 WARN [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T22:42:14,425 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3b0ca91ed64a,36309,1731969733443 2024-11-18T22:42:14,465 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/hbase.id] with ID: b6c3d5d4-b377-4fc1-b3e8-6407f43c2ccb 2024-11-18T22:42:14,465 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/.tmp/hbase.id 2024-11-18T22:42:14,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46627 is added to blk_1073741826_1002 (size=42) 2024-11-18T22:42:14,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34209 is added to blk_1073741826_1002 (size=42) 2024-11-18T22:42:14,905 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/.tmp/hbase.id]:[hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/hbase.id] 2024-11-18T22:42:14,962 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:42:14,963 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T22:42:14,974 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 11ms. 2024-11-18T22:42:15,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:15,006 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35719-0x101501cccfb0001, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:15,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46627 is added to blk_1073741827_1003 (size=196) 2024-11-18T22:42:15,080 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T22:42:15,081 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T22:42:15,082 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T22:42:15,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34209 is added to blk_1073741827_1003 (size=196) 2024-11-18T22:42:15,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34209 is added to blk_1073741828_1004 (size=1189) 2024-11-18T22:42:15,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46627 is added to blk_1073741828_1004 (size=1189) 2024-11-18T22:42:15,164 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/data/master/store 2024-11-18T22:42:15,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34209 is added to blk_1073741829_1005 (size=34) 2024-11-18T22:42:15,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46627 is added to blk_1073741829_1005 (size=34) 2024-11-18T22:42:15,657 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:42:15,657 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T22:42:15,657 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:42:15,657 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:42:15,657 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T22:42:15,657 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:42:15,658 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:42:15,658 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731969735657Disabling compacts and flushes for region at 1731969735657Disabling writes for close at 1731969735657Writing region close event to WAL at 1731969735657Closed at 1731969735657 2024-11-18T22:42:15,676 WARN [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/data/master/store/.initializing 2024-11-18T22:42:15,677 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443 2024-11-18T22:42:15,724 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3b0ca91ed64a%2C36309%2C1731969733443, suffix=, logDir=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443, archiveDir=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/oldWALs, maxLogs=10 2024-11-18T22:42:15,725 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 2024-11-18T22:42:15,788 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 2024-11-18T22:42:15,800 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42573:42573),(127.0.0.1/127.0.0.1:38303:38303)] 2024-11-18T22:42:15,812 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T22:42:15,812 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:42:15,812 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:42:15,812 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:42:15,824 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:42:15,830 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T22:42:15,831 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:42:15,834 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:42:15,834 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:42:15,854 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T22:42:15,854 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:42:15,860 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T22:42:15,861 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:42:15,873 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T22:42:15,873 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:42:15,885 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T22:42:15,886 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:42:15,890 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T22:42:15,891 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:42:15,891 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T22:42:15,892 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:42:15,896 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:42:15,901 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:42:15,916 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:42:15,917 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:42:15,918 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T22:42:15,921 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:42:15,930 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T22:42:15,931 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=695578, jitterRate=-0.11552761495113373}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T22:42:15,933 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731969735813Initializing all the Stores at 1731969735816 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969735816Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969735822 (+6 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969735822Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969735822Cleaning up temporary data from old regions at 1731969735917 (+95 ms)Region opened successfully at 1731969735932 (+15 ms) 2024-11-18T22:42:15,937 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T22:42:15,963 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57418543, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3b0ca91ed64a/172.17.0.2:0 2024-11-18T22:42:15,968 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T22:42:15,968 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T22:42:15,968 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T22:42:15,969 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T22:42:15,972 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 2 msec 2024-11-18T22:42:15,972 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T22:42:15,972 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T22:42:15,988 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T22:42:15,996 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T22:42:16,028 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T22:42:16,029 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T22:42:16,048 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T22:42:16,065 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T22:42:16,065 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T22:42:16,076 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T22:42:16,088 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T22:42:16,096 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T22:42:16,111 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T22:42:16,135 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T22:42:16,144 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T22:42:16,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T22:42:16,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:16,156 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3b0ca91ed64a,36309,1731969733443, sessionid=0x101501cccfb0000, setting cluster-up flag (Was=false) 2024-11-18T22:42:16,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35719-0x101501cccfb0001, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T22:42:16,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35719-0x101501cccfb0001, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:16,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:16,322 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T22:42:16,325 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3b0ca91ed64a,36309,1731969733443 2024-11-18T22:42:16,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:16,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35719-0x101501cccfb0001, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:16,428 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T22:42:16,456 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3b0ca91ed64a,36309,1731969733443 2024-11-18T22:42:16,476 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T22:42:16,488 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T22:42:16,488 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T22:42:16,489 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T22:42:16,489 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3b0ca91ed64a,36309,1731969733443 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T22:42:16,501 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3b0ca91ed64a:0, corePoolSize=5, maxPoolSize=5 2024-11-18T22:42:16,501 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3b0ca91ed64a:0, corePoolSize=5, maxPoolSize=5 2024-11-18T22:42:16,501 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=5, maxPoolSize=5 2024-11-18T22:42:16,501 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=5, maxPoolSize=5 2024-11-18T22:42:16,501 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3b0ca91ed64a:0, corePoolSize=10, maxPoolSize=10 2024-11-18T22:42:16,501 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:16,502 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=2, maxPoolSize=2 2024-11-18T22:42:16,502 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:16,516 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731969766516 2024-11-18T22:42:16,516 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T22:42:16,517 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T22:42:16,517 DEBUG [PEWorker-3 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T22:42:16,517 INFO [PEWorker-3 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T22:42:16,517 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T22:42:16,519 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T22:42:16,519 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T22:42:16,519 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T22:42:16,522 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:16,523 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T22:42:16,523 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T22:42:16,523 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T22:42:16,524 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T22:42:16,524 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T22:42:16,524 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.large.0-1731969736524,5,FailOnTimeoutGroup] 2024-11-18T22:42:16,526 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.small.0-1731969736524,5,FailOnTimeoutGroup] 2024-11-18T22:42:16,526 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:16,526 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T22:42:16,526 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:16,526 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:16,527 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:42:16,594 INFO [PEWorker-3 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T22:42:16,614 INFO [RS:0;3b0ca91ed64a:35719 {}] regionserver.HRegionServer(746): ClusterId : b6c3d5d4-b377-4fc1-b3e8-6407f43c2ccb 2024-11-18T22:42:16,614 DEBUG [RS:0;3b0ca91ed64a:35719 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T22:42:16,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34209 is added to blk_1073741831_1007 (size=1321) 2024-11-18T22:42:16,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46627 is added to blk_1073741831_1007 (size=1321) 2024-11-18T22:42:16,696 DEBUG [RS:0;3b0ca91ed64a:35719 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T22:42:16,696 DEBUG [RS:0;3b0ca91ed64a:35719 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T22:42:16,729 DEBUG [RS:0;3b0ca91ed64a:35719 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T22:42:16,730 DEBUG [RS:0;3b0ca91ed64a:35719 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36666631, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3b0ca91ed64a/172.17.0.2:0 2024-11-18T22:42:16,750 DEBUG [RS:0;3b0ca91ed64a:35719 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3b0ca91ed64a:35719 2024-11-18T22:42:16,750 INFO [RS:0;3b0ca91ed64a:35719 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T22:42:16,750 INFO [RS:0;3b0ca91ed64a:35719 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T22:42:16,750 DEBUG [RS:0;3b0ca91ed64a:35719 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T22:42:16,758 INFO [RS:0;3b0ca91ed64a:35719 {}] regionserver.HRegionServer(2659): reportForDuty to master=3b0ca91ed64a,36309,1731969733443 with port=35719, startcode=1731969734188 2024-11-18T22:42:16,759 DEBUG [RS:0;3b0ca91ed64a:35719 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T22:42:16,764 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58485, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T22:42:16,768 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36309 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3b0ca91ed64a,35719,1731969734188 2024-11-18T22:42:16,769 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36309 {}] master.ServerManager(517): Registering regionserver=3b0ca91ed64a,35719,1731969734188 2024-11-18T22:42:16,785 DEBUG [RS:0;3b0ca91ed64a:35719 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153 2024-11-18T22:42:16,785 DEBUG [RS:0;3b0ca91ed64a:35719 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42521 2024-11-18T22:42:16,785 DEBUG [RS:0;3b0ca91ed64a:35719 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T22:42:16,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T22:42:16,804 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3b0ca91ed64a,35719,1731969734188] 2024-11-18T22:42:16,808 DEBUG [RS:0;3b0ca91ed64a:35719 {}] zookeeper.ZKUtil(111): regionserver:35719-0x101501cccfb0001, quorum=127.0.0.1:49655, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3b0ca91ed64a,35719,1731969734188 2024-11-18T22:42:16,808 WARN [RS:0;3b0ca91ed64a:35719 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T22:42:16,808 INFO [RS:0;3b0ca91ed64a:35719 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T22:42:16,809 DEBUG [RS:0;3b0ca91ed64a:35719 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188 2024-11-18T22:42:16,858 INFO [RS:0;3b0ca91ed64a:35719 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T22:42:16,864 INFO [RS:0;3b0ca91ed64a:35719 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T22:42:16,866 INFO [RS:0;3b0ca91ed64a:35719 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T22:42:16,866 INFO [RS:0;3b0ca91ed64a:35719 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:16,883 INFO [RS:0;3b0ca91ed64a:35719 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T22:42:16,892 INFO [RS:0;3b0ca91ed64a:35719 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T22:42:16,892 INFO [RS:0;3b0ca91ed64a:35719 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:16,893 DEBUG [RS:0;3b0ca91ed64a:35719 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:16,893 DEBUG [RS:0;3b0ca91ed64a:35719 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:16,893 DEBUG [RS:0;3b0ca91ed64a:35719 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:16,893 DEBUG [RS:0;3b0ca91ed64a:35719 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:16,893 DEBUG [RS:0;3b0ca91ed64a:35719 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:16,893 DEBUG [RS:0;3b0ca91ed64a:35719 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3b0ca91ed64a:0, corePoolSize=2, maxPoolSize=2 2024-11-18T22:42:16,893 DEBUG [RS:0;3b0ca91ed64a:35719 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:16,893 DEBUG [RS:0;3b0ca91ed64a:35719 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:16,893 DEBUG [RS:0;3b0ca91ed64a:35719 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:16,893 DEBUG [RS:0;3b0ca91ed64a:35719 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:16,894 DEBUG [RS:0;3b0ca91ed64a:35719 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:16,894 DEBUG [RS:0;3b0ca91ed64a:35719 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:16,894 DEBUG [RS:0;3b0ca91ed64a:35719 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3b0ca91ed64a:0, corePoolSize=3, maxPoolSize=3 2024-11-18T22:42:16,894 DEBUG [RS:0;3b0ca91ed64a:35719 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0, corePoolSize=3, maxPoolSize=3 2024-11-18T22:42:16,900 INFO [RS:0;3b0ca91ed64a:35719 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:16,900 INFO [RS:0;3b0ca91ed64a:35719 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:16,900 INFO [RS:0;3b0ca91ed64a:35719 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:16,900 INFO [RS:0;3b0ca91ed64a:35719 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:16,900 INFO [RS:0;3b0ca91ed64a:35719 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:16,900 INFO [RS:0;3b0ca91ed64a:35719 {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,35719,1731969734188-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T22:42:16,925 INFO [RS:0;3b0ca91ed64a:35719 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T22:42:16,926 INFO [RS:0;3b0ca91ed64a:35719 {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,35719,1731969734188-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:16,926 INFO [RS:0;3b0ca91ed64a:35719 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:16,926 INFO [RS:0;3b0ca91ed64a:35719 {}] regionserver.Replication(171): 3b0ca91ed64a,35719,1731969734188 started 2024-11-18T22:42:16,951 INFO [RS:0;3b0ca91ed64a:35719 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:16,951 INFO [RS:0;3b0ca91ed64a:35719 {}] regionserver.HRegionServer(1482): Serving as 3b0ca91ed64a,35719,1731969734188, RpcServer on 3b0ca91ed64a/172.17.0.2:35719, sessionid=0x101501cccfb0001 2024-11-18T22:42:16,952 DEBUG [RS:0;3b0ca91ed64a:35719 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T22:42:16,952 DEBUG [RS:0;3b0ca91ed64a:35719 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3b0ca91ed64a,35719,1731969734188 2024-11-18T22:42:16,952 DEBUG [RS:0;3b0ca91ed64a:35719 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3b0ca91ed64a,35719,1731969734188' 2024-11-18T22:42:16,952 DEBUG [RS:0;3b0ca91ed64a:35719 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T22:42:16,967 DEBUG [RS:0;3b0ca91ed64a:35719 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T22:42:16,970 DEBUG [RS:0;3b0ca91ed64a:35719 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T22:42:16,970 DEBUG [RS:0;3b0ca91ed64a:35719 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T22:42:16,970 DEBUG [RS:0;3b0ca91ed64a:35719 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3b0ca91ed64a,35719,1731969734188 2024-11-18T22:42:16,970 DEBUG [RS:0;3b0ca91ed64a:35719 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3b0ca91ed64a,35719,1731969734188' 2024-11-18T22:42:16,970 DEBUG [RS:0;3b0ca91ed64a:35719 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T22:42:16,974 DEBUG [RS:0;3b0ca91ed64a:35719 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T22:42:16,976 DEBUG [RS:0;3b0ca91ed64a:35719 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T22:42:16,976 INFO [RS:0;3b0ca91ed64a:35719 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T22:42:16,977 INFO [RS:0;3b0ca91ed64a:35719 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T22:42:17,042 INFO [PEWorker-3 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T22:42:17,043 INFO [PEWorker-3 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153 2024-11-18T22:42:17,093 INFO [RS:0;3b0ca91ed64a:35719 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3b0ca91ed64a%2C35719%2C1731969734188, suffix=, logDir=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188, archiveDir=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/oldWALs, maxLogs=32 2024-11-18T22:42:17,097 INFO [RS:0;3b0ca91ed64a:35719 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C35719%2C1731969734188.1731969737096 2024-11-18T22:42:17,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34209 is added to blk_1073741832_1008 (size=32) 2024-11-18T22:42:17,113 DEBUG [PEWorker-3 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:42:17,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46627 is added to blk_1073741832_1008 (size=32) 2024-11-18T22:42:17,123 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T22:42:17,137 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T22:42:17,137 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:42:17,140 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:42:17,140 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T22:42:17,153 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T22:42:17,153 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:42:17,156 INFO [RS:0;3b0ca91ed64a:35719 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969737096 2024-11-18T22:42:17,160 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:42:17,160 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T22:42:17,171 DEBUG [RS:0;3b0ca91ed64a:35719 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38303:38303),(127.0.0.1/127.0.0.1:42573:42573)] 2024-11-18T22:42:17,173 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T22:42:17,173 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:42:17,178 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:42:17,178 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T22:42:17,197 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T22:42:17,197 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:42:17,201 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:42:17,201 DEBUG [PEWorker-3 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T22:42:17,215 DEBUG [PEWorker-3 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/hbase/meta/1588230740 2024-11-18T22:42:17,219 DEBUG [PEWorker-3 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/hbase/meta/1588230740 2024-11-18T22:42:17,234 DEBUG [PEWorker-3 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T22:42:17,235 DEBUG [PEWorker-3 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T22:42:17,237 DEBUG [PEWorker-3 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T22:42:17,240 DEBUG [PEWorker-3 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T22:42:17,244 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T22:42:17,245 INFO [PEWorker-3 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=739049, jitterRate=-0.06025111675262451}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T22:42:17,246 DEBUG [PEWorker-3 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731969737113Initializing all the Stores at 1731969737121 (+8 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969737121Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969737123 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969737123Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969737123Cleaning up temporary data from old regions at 1731969737235 (+112 ms)Region opened successfully at 1731969737246 (+11 ms) 2024-11-18T22:42:17,246 DEBUG [PEWorker-3 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T22:42:17,246 INFO [PEWorker-3 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T22:42:17,247 DEBUG [PEWorker-3 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T22:42:17,247 DEBUG [PEWorker-3 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T22:42:17,247 DEBUG [PEWorker-3 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T22:42:17,262 INFO [PEWorker-3 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T22:42:17,265 DEBUG [PEWorker-3 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731969737246Disabling compacts and flushes for region at 1731969737246Disabling writes for close at 1731969737247 (+1 ms)Writing region close event to WAL at 1731969737262 (+15 ms)Closed at 1731969737262 2024-11-18T22:42:17,273 DEBUG [PEWorker-3 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T22:42:17,274 INFO [PEWorker-3 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T22:42:17,274 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T22:42:17,283 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T22:42:17,292 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T22:42:17,443 DEBUG [3b0ca91ed64a:36309 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T22:42:17,444 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3b0ca91ed64a,35719,1731969734188 2024-11-18T22:42:17,446 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3b0ca91ed64a,35719,1731969734188, state=OPENING 2024-11-18T22:42:17,546 DEBUG [PEWorker-4 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T22:42:17,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:17,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35719-0x101501cccfb0001, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:17,555 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T22:42:17,555 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T22:42:17,555 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T22:42:17,555 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3b0ca91ed64a,35719,1731969734188}] 2024-11-18T22:42:17,710 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T22:42:17,713 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43055, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T22:42:17,726 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T22:42:17,727 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T22:42:17,738 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3b0ca91ed64a%2C35719%2C1731969734188.meta, suffix=.meta, logDir=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188, archiveDir=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/oldWALs, maxLogs=32 2024-11-18T22:42:17,739 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta 2024-11-18T22:42:17,778 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta 2024-11-18T22:42:17,808 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42573:42573),(127.0.0.1/127.0.0.1:38303:38303)] 2024-11-18T22:42:17,824 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T22:42:17,825 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T22:42:17,825 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T22:42:17,825 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T22:42:17,825 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T22:42:17,825 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:42:17,826 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T22:42:17,826 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T22:42:17,844 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T22:42:17,846 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T22:42:17,846 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:42:17,846 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:42:17,847 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T22:42:17,848 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T22:42:17,848 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:42:17,849 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:42:17,849 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T22:42:17,850 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T22:42:17,851 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:42:17,851 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:42:17,852 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T22:42:17,853 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T22:42:17,854 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:42:17,854 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:42:17,856 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T22:42:17,857 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/hbase/meta/1588230740 2024-11-18T22:42:17,859 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/hbase/meta/1588230740 2024-11-18T22:42:17,861 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T22:42:17,861 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T22:42:17,862 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T22:42:17,866 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T22:42:17,868 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=803879, jitterRate=0.022185996174812317}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T22:42:17,868 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T22:42:17,869 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731969737826Writing region info on filesystem at 1731969737826Initializing all the Stores at 1731969737837 (+11 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969737837Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969737838 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969737838Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969737838Cleaning up temporary data from old regions at 1731969737861 (+23 ms)Running coprocessor post-open hooks at 1731969737868 (+7 ms)Region opened successfully at 1731969737869 (+1 ms) 2024-11-18T22:42:17,876 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731969737709 2024-11-18T22:42:17,886 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T22:42:17,886 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T22:42:17,897 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3b0ca91ed64a,35719,1731969734188 2024-11-18T22:42:17,900 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3b0ca91ed64a,35719,1731969734188, state=OPEN 2024-11-18T22:42:17,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35719-0x101501cccfb0001, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T22:42:17,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T22:42:17,971 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T22:42:17,971 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T22:42:17,972 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3b0ca91ed64a,35719,1731969734188 2024-11-18T22:42:17,989 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T22:42:17,989 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3b0ca91ed64a,35719,1731969734188 in 417 msec 2024-11-18T22:42:18,006 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T22:42:18,006 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 714 msec 2024-11-18T22:42:18,018 DEBUG [PEWorker-3 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T22:42:18,018 INFO [PEWorker-3 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T22:42:18,028 DEBUG [PEWorker-3 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T22:42:18,029 DEBUG [PEWorker-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3b0ca91ed64a,35719,1731969734188, seqNum=-1] 2024-11-18T22:42:18,029 DEBUG [PEWorker-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T22:42:18,033 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59203, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T22:42:18,061 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.5760 sec 2024-11-18T22:42:18,061 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731969738061, completionTime=-1 2024-11-18T22:42:18,061 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T22:42:18,061 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-18T22:42:18,064 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-18T22:42:18,064 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731969798064 2024-11-18T22:42:18,064 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731969858064 2024-11-18T22:42:18,064 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-18T22:42:18,065 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,36309,1731969733443-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:18,065 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,36309,1731969733443-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:18,065 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,36309,1731969733443-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:18,065 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3b0ca91ed64a:36309, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:18,065 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:18,065 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:18,071 DEBUG [master/3b0ca91ed64a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T22:42:18,076 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 3.651sec 2024-11-18T22:42:18,076 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T22:42:18,076 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T22:42:18,076 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T22:42:18,076 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T22:42:18,076 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T22:42:18,076 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,36309,1731969733443-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T22:42:18,077 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,36309,1731969733443-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T22:42:18,105 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7044e1ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T22:42:18,105 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3b0ca91ed64a,36309,-1 for getting cluster id 2024-11-18T22:42:18,105 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T22:42:18,108 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T22:42:18,108 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T22:42:18,108 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,36309,1731969733443-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:18,118 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b6c3d5d4-b377-4fc1-b3e8-6407f43c2ccb' 2024-11-18T22:42:18,120 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T22:42:18,121 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b6c3d5d4-b377-4fc1-b3e8-6407f43c2ccb" 2024-11-18T22:42:18,121 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d8a8688, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T22:42:18,121 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3b0ca91ed64a,36309,-1] 2024-11-18T22:42:18,121 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T22:42:18,140 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:42:18,146 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58706, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T22:42:18,148 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d25550f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T22:42:18,148 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T22:42:18,154 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3b0ca91ed64a,35719,1731969734188, seqNum=-1] 2024-11-18T22:42:18,155 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T22:42:18,163 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49744, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T22:42:18,169 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3b0ca91ed64a,36309,1731969733443 2024-11-18T22:42:18,169 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:42:18,179 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T22:42:18,198 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3b0ca91ed64a:0 server-side Connection retries=45 2024-11-18T22:42:18,198 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:42:18,198 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T22:42:18,198 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T22:42:18,198 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:42:18,198 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T22:42:18,198 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T22:42:18,198 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T22:42:18,200 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39695 2024-11-18T22:42:18,203 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39695 connecting to ZooKeeper ensemble=127.0.0.1:49655 2024-11-18T22:42:18,204 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:42:18,206 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:42:18,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:396950x0, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T22:42:18,225 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39695-0x101501cccfb0002 connected 2024-11-18T22:42:18,226 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:39695-0x101501cccfb0002, quorum=127.0.0.1:49655, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-18T22:42:18,227 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-18T22:42:18,228 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T22:42:18,231 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T22:42:18,232 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:39695-0x101501cccfb0002, quorum=127.0.0.1:49655, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T22:42:18,234 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39695-0x101501cccfb0002, quorum=127.0.0.1:49655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T22:42:18,241 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39695 2024-11-18T22:42:18,245 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39695 2024-11-18T22:42:18,248 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39695 2024-11-18T22:42:18,256 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39695 2024-11-18T22:42:18,260 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39695 2024-11-18T22:42:18,268 INFO [RS:1;3b0ca91ed64a:39695 {}] regionserver.HRegionServer(746): ClusterId : b6c3d5d4-b377-4fc1-b3e8-6407f43c2ccb 2024-11-18T22:42:18,268 DEBUG [RS:1;3b0ca91ed64a:39695 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T22:42:18,295 DEBUG [RS:1;3b0ca91ed64a:39695 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T22:42:18,295 DEBUG [RS:1;3b0ca91ed64a:39695 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T22:42:18,316 DEBUG [RS:1;3b0ca91ed64a:39695 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T22:42:18,317 DEBUG [RS:1;3b0ca91ed64a:39695 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@153c0140, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3b0ca91ed64a/172.17.0.2:0 2024-11-18T22:42:18,339 DEBUG [RS:1;3b0ca91ed64a:39695 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;3b0ca91ed64a:39695 2024-11-18T22:42:18,339 INFO [RS:1;3b0ca91ed64a:39695 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T22:42:18,339 INFO [RS:1;3b0ca91ed64a:39695 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T22:42:18,339 DEBUG [RS:1;3b0ca91ed64a:39695 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T22:42:18,347 INFO [RS:1;3b0ca91ed64a:39695 {}] regionserver.HRegionServer(2659): reportForDuty to master=3b0ca91ed64a,36309,1731969733443 with port=39695, startcode=1731969738197 2024-11-18T22:42:18,347 DEBUG [RS:1;3b0ca91ed64a:39695 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T22:42:18,353 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57847, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T22:42:18,354 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36309 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3b0ca91ed64a,39695,1731969738197 2024-11-18T22:42:18,354 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36309 {}] master.ServerManager(517): Registering regionserver=3b0ca91ed64a,39695,1731969738197 2024-11-18T22:42:18,367 DEBUG [RS:1;3b0ca91ed64a:39695 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153 2024-11-18T22:42:18,367 DEBUG [RS:1;3b0ca91ed64a:39695 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42521 2024-11-18T22:42:18,367 DEBUG [RS:1;3b0ca91ed64a:39695 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T22:42:18,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T22:42:18,398 DEBUG [RS:1;3b0ca91ed64a:39695 {}] zookeeper.ZKUtil(111): regionserver:39695-0x101501cccfb0002, quorum=127.0.0.1:49655, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3b0ca91ed64a,39695,1731969738197 2024-11-18T22:42:18,398 WARN [RS:1;3b0ca91ed64a:39695 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T22:42:18,398 INFO [RS:1;3b0ca91ed64a:39695 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T22:42:18,398 DEBUG [RS:1;3b0ca91ed64a:39695 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197 2024-11-18T22:42:18,404 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3b0ca91ed64a,39695,1731969738197] 2024-11-18T22:42:18,464 INFO [RS:1;3b0ca91ed64a:39695 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T22:42:18,468 INFO [RS:1;3b0ca91ed64a:39695 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T22:42:18,472 INFO [RS:1;3b0ca91ed64a:39695 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T22:42:18,472 INFO [RS:1;3b0ca91ed64a:39695 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:18,474 INFO [RS:1;3b0ca91ed64a:39695 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T22:42:18,476 INFO [RS:1;3b0ca91ed64a:39695 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T22:42:18,476 INFO [RS:1;3b0ca91ed64a:39695 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:18,477 DEBUG [RS:1;3b0ca91ed64a:39695 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:18,477 DEBUG [RS:1;3b0ca91ed64a:39695 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:18,477 DEBUG [RS:1;3b0ca91ed64a:39695 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:18,477 DEBUG [RS:1;3b0ca91ed64a:39695 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:18,477 DEBUG [RS:1;3b0ca91ed64a:39695 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:18,477 DEBUG [RS:1;3b0ca91ed64a:39695 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3b0ca91ed64a:0, corePoolSize=2, maxPoolSize=2 2024-11-18T22:42:18,477 DEBUG [RS:1;3b0ca91ed64a:39695 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:18,478 DEBUG [RS:1;3b0ca91ed64a:39695 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:18,478 DEBUG [RS:1;3b0ca91ed64a:39695 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:18,478 DEBUG [RS:1;3b0ca91ed64a:39695 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:18,478 DEBUG [RS:1;3b0ca91ed64a:39695 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:18,478 DEBUG [RS:1;3b0ca91ed64a:39695 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:42:18,481 DEBUG [RS:1;3b0ca91ed64a:39695 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3b0ca91ed64a:0, corePoolSize=3, maxPoolSize=3 2024-11-18T22:42:18,482 DEBUG [RS:1;3b0ca91ed64a:39695 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0, corePoolSize=3, maxPoolSize=3 2024-11-18T22:42:18,495 INFO [RS:1;3b0ca91ed64a:39695 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:18,496 INFO [RS:1;3b0ca91ed64a:39695 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:18,496 INFO [RS:1;3b0ca91ed64a:39695 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:18,496 INFO [RS:1;3b0ca91ed64a:39695 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:18,496 INFO [RS:1;3b0ca91ed64a:39695 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:18,496 INFO [RS:1;3b0ca91ed64a:39695 {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,39695,1731969738197-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T22:42:18,533 INFO [RS:1;3b0ca91ed64a:39695 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T22:42:18,533 INFO [RS:1;3b0ca91ed64a:39695 {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,39695,1731969738197-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:18,533 INFO [RS:1;3b0ca91ed64a:39695 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:18,533 INFO [RS:1;3b0ca91ed64a:39695 {}] regionserver.Replication(171): 3b0ca91ed64a,39695,1731969738197 started 2024-11-18T22:42:18,556 INFO [RS:1;3b0ca91ed64a:39695 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:42:18,556 INFO [RS:1;3b0ca91ed64a:39695 {}] regionserver.HRegionServer(1482): Serving as 3b0ca91ed64a,39695,1731969738197, RpcServer on 3b0ca91ed64a/172.17.0.2:39695, sessionid=0x101501cccfb0002 2024-11-18T22:42:18,556 DEBUG [RS:1;3b0ca91ed64a:39695 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T22:42:18,556 DEBUG [RS:1;3b0ca91ed64a:39695 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3b0ca91ed64a,39695,1731969738197 2024-11-18T22:42:18,556 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;3b0ca91ed64a:39695,5,FailOnTimeoutGroup] 2024-11-18T22:42:18,556 DEBUG [RS:1;3b0ca91ed64a:39695 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3b0ca91ed64a,39695,1731969738197' 2024-11-18T22:42:18,556 DEBUG [RS:1;3b0ca91ed64a:39695 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T22:42:18,557 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-18T22:42:18,557 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-18T22:42:18,561 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 3b0ca91ed64a,36309,1731969733443 2024-11-18T22:42:18,566 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@ceedca5 2024-11-18T22:42:18,566 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-18T22:42:18,576 DEBUG [RS:1;3b0ca91ed64a:39695 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T22:42:18,581 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58716, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-18T22:42:18,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36309 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-18T22:42:18,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36309 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-18T22:42:18,582 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36309 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T22:42:18,592 DEBUG [RS:1;3b0ca91ed64a:39695 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T22:42:18,592 DEBUG [RS:1;3b0ca91ed64a:39695 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T22:42:18,592 DEBUG [RS:1;3b0ca91ed64a:39695 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3b0ca91ed64a,39695,1731969738197 2024-11-18T22:42:18,593 DEBUG [RS:1;3b0ca91ed64a:39695 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3b0ca91ed64a,39695,1731969738197' 2024-11-18T22:42:18,593 DEBUG [RS:1;3b0ca91ed64a:39695 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T22:42:18,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36309 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-18T22:42:18,607 DEBUG [RS:1;3b0ca91ed64a:39695 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T22:42:18,613 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-18T22:42:18,613 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:42:18,613 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36309 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-18T22:42:18,616 DEBUG [RS:1;3b0ca91ed64a:39695 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T22:42:18,616 INFO [RS:1;3b0ca91ed64a:39695 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T22:42:18,616 INFO [RS:1;3b0ca91ed64a:39695 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T22:42:18,617 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-18T22:42:18,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36309 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T22:42:18,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34209 is added to blk_1073741835_1011 (size=393) 2024-11-18T22:42:18,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46627 is added to blk_1073741835_1011 (size=393) 2024-11-18T22:42:18,675 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 15e7abf84071cfc525f00a5a896716ea, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153 2024-11-18T22:42:18,722 INFO [RS:1;3b0ca91ed64a:39695 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3b0ca91ed64a%2C39695%2C1731969738197, suffix=, logDir=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197, archiveDir=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/oldWALs, maxLogs=32 2024-11-18T22:42:18,723 INFO [RS:1;3b0ca91ed64a:39695 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 2024-11-18T22:42:18,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34209 is added to blk_1073741836_1012 (size=76) 2024-11-18T22:42:18,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46627 is added to blk_1073741836_1012 (size=76) 2024-11-18T22:42:18,736 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:42:18,736 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 15e7abf84071cfc525f00a5a896716ea, disabling compactions & flushes 2024-11-18T22:42:18,737 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea. 2024-11-18T22:42:18,737 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea. 2024-11-18T22:42:18,737 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea. after waiting 0 ms 2024-11-18T22:42:18,737 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea. 2024-11-18T22:42:18,737 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea. 2024-11-18T22:42:18,737 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 15e7abf84071cfc525f00a5a896716ea: Waiting for close lock at 1731969738736Disabling compacts and flushes for region at 1731969738736Disabling writes for close at 1731969738737 (+1 ms)Writing region close event to WAL at 1731969738737Closed at 1731969738737 2024-11-18T22:42:18,739 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-18T22:42:18,739 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731969738739"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731969738739"}]},"ts":"1731969738739"} 2024-11-18T22:42:18,742 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-18T22:42:18,744 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-18T22:42:18,744 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731969738744"}]},"ts":"1731969738744"} 2024-11-18T22:42:18,747 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-18T22:42:18,748 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=15e7abf84071cfc525f00a5a896716ea, ASSIGN}] 2024-11-18T22:42:18,750 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=15e7abf84071cfc525f00a5a896716ea, ASSIGN 2024-11-18T22:42:18,752 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=15e7abf84071cfc525f00a5a896716ea, ASSIGN; state=OFFLINE, location=3b0ca91ed64a,35719,1731969734188; forceNewPlan=false, retain=false 2024-11-18T22:42:18,760 INFO [RS:1;3b0ca91ed64a:39695 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 2024-11-18T22:42:18,767 DEBUG [RS:1;3b0ca91ed64a:39695 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38303:38303),(127.0.0.1/127.0.0.1:42573:42573)] 2024-11-18T22:42:18,904 INFO [3b0ca91ed64a:36309 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-18T22:42:18,904 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=15e7abf84071cfc525f00a5a896716ea, regionState=OPENING, regionLocation=3b0ca91ed64a,35719,1731969734188 2024-11-18T22:42:18,914 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=15e7abf84071cfc525f00a5a896716ea, ASSIGN because future has completed 2024-11-18T22:42:18,920 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 15e7abf84071cfc525f00a5a896716ea, server=3b0ca91ed64a,35719,1731969734188}] 2024-11-18T22:42:19,096 INFO [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea. 2024-11-18T22:42:19,097 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 15e7abf84071cfc525f00a5a896716ea, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea.', STARTKEY => '', ENDKEY => ''} 2024-11-18T22:42:19,098 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 15e7abf84071cfc525f00a5a896716ea 2024-11-18T22:42:19,098 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:42:19,098 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 15e7abf84071cfc525f00a5a896716ea 2024-11-18T22:42:19,098 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 15e7abf84071cfc525f00a5a896716ea 2024-11-18T22:42:19,116 INFO [StoreOpener-15e7abf84071cfc525f00a5a896716ea-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 15e7abf84071cfc525f00a5a896716ea 2024-11-18T22:42:19,133 INFO [StoreOpener-15e7abf84071cfc525f00a5a896716ea-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 15e7abf84071cfc525f00a5a896716ea columnFamilyName info 2024-11-18T22:42:19,133 DEBUG [StoreOpener-15e7abf84071cfc525f00a5a896716ea-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:42:19,140 INFO [StoreOpener-15e7abf84071cfc525f00a5a896716ea-1 {}] regionserver.HStore(327): Store=15e7abf84071cfc525f00a5a896716ea/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T22:42:19,144 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 15e7abf84071cfc525f00a5a896716ea 2024-11-18T22:42:19,151 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea 2024-11-18T22:42:19,156 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea 2024-11-18T22:42:19,161 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 15e7abf84071cfc525f00a5a896716ea 2024-11-18T22:42:19,162 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 15e7abf84071cfc525f00a5a896716ea 2024-11-18T22:42:19,166 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 15e7abf84071cfc525f00a5a896716ea 2024-11-18T22:42:19,179 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T22:42:19,180 INFO [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 15e7abf84071cfc525f00a5a896716ea; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=841759, jitterRate=0.07035228610038757}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T22:42:19,181 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 15e7abf84071cfc525f00a5a896716ea 2024-11-18T22:42:19,182 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 15e7abf84071cfc525f00a5a896716ea: Running coprocessor pre-open hook at 1731969739098Writing region info on filesystem at 1731969739098Initializing all the Stores at 1731969739108 (+10 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969739109 (+1 ms)Cleaning up temporary data from old regions at 1731969739162 (+53 ms)Running coprocessor post-open hooks at 1731969739181 (+19 ms)Region opened successfully at 1731969739181 2024-11-18T22:42:19,188 INFO [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea., pid=6, masterSystemTime=1731969739084 2024-11-18T22:42:19,194 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea. 2024-11-18T22:42:19,195 INFO [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea. 2024-11-18T22:42:19,195 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=15e7abf84071cfc525f00a5a896716ea, regionState=OPEN, openSeqNum=2, regionLocation=3b0ca91ed64a,35719,1731969734188 2024-11-18T22:42:19,204 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 15e7abf84071cfc525f00a5a896716ea, server=3b0ca91ed64a,35719,1731969734188 because future has completed 2024-11-18T22:42:19,244 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-18T22:42:19,245 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 15e7abf84071cfc525f00a5a896716ea, server=3b0ca91ed64a,35719,1731969734188 in 300 msec 2024-11-18T22:42:19,264 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-18T22:42:19,264 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=15e7abf84071cfc525f00a5a896716ea, ASSIGN in 498 msec 2024-11-18T22:42:19,273 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-18T22:42:19,274 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731969739273"}]},"ts":"1731969739273"} 2024-11-18T22:42:19,286 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-18T22:42:19,295 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-18T22:42:19,321 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 713 msec 2024-11-18T22:42:22,859 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:42:22,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:42:22,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:42:23,378 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T22:42:23,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:42:23,408 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:42:23,409 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:42:23,436 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-18T22:42:27,235 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T22:42:27,235 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-18T22:42:27,236 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-18T22:42:27,236 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-18T22:42:27,237 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T22:42:27,237 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-18T22:42:28,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36309 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T22:42:28,664 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-18T22:42:28,664 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-18T22:42:28,670 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-18T22:42:28,670 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea. 2024-11-18T22:42:28,741 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:42:28,749 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:42:28,755 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:42:28,756 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:42:28,756 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T22:42:28,761 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7278bc9b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:42:28,762 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3820ef29{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:42:28,910 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@24e79ea6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/java.io.tmpdir/jetty-localhost-37715-hadoop-hdfs-3_4_1-tests_jar-_-any-9769132544368492022/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:42:28,911 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5e0d6f2f{HTTP/1.1, (http/1.1)}{localhost:37715} 2024-11-18T22:42:28,911 INFO [Time-limited test {}] server.Server(415): Started @142711ms 2024-11-18T22:42:28,913 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T22:42:29,079 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:42:29,084 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:42:29,087 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:42:29,087 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:42:29,087 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T22:42:29,088 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d01e731{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:42:29,089 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57b21db5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:42:29,216 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@23861c9a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/java.io.tmpdir/jetty-localhost-37021-hadoop-hdfs-3_4_1-tests_jar-_-any-7260945834869558462/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:42:29,217 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@70c175e{HTTP/1.1, (http/1.1)}{localhost:37021} 2024-11-18T22:42:29,217 INFO [Time-limited test {}] server.Server(415): Started @143017ms 2024-11-18T22:42:29,219 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T22:42:29,360 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:42:29,369 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:42:29,378 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:42:29,378 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:42:29,378 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T22:42:29,379 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c2aec0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:42:29,380 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@23c971b8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:42:29,528 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5144450d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/java.io.tmpdir/jetty-localhost-39037-hadoop-hdfs-3_4_1-tests_jar-_-any-13381347433826064747/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:42:29,528 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@300b140{HTTP/1.1, (http/1.1)}{localhost:39037} 2024-11-18T22:42:29,528 INFO [Time-limited test {}] server.Server(415): Started @143328ms 2024-11-18T22:42:29,530 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T22:42:30,708 WARN [Thread-867 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data5/current/BP-1358070786-172.17.0.2-1731969729378/current, will proceed with Du for space computation calculation, 2024-11-18T22:42:30,712 WARN [Thread-868 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data6/current/BP-1358070786-172.17.0.2-1731969729378/current, will proceed with Du for space computation calculation, 2024-11-18T22:42:30,804 WARN [Thread-809 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T22:42:30,812 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x94fe528e34ba7770 with lease ID 0xce46b989a0507481: Processing first storage report for DS-82377799-5076-490f-925a-460019a7a47d from datanode DatanodeRegistration(127.0.0.1:33021, datanodeUuid=0a7e7872-bf00-4ed0-8f92-bba4a2dd250c, infoPort=43051, infoSecurePort=0, ipcPort=35865, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378) 2024-11-18T22:42:30,812 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x94fe528e34ba7770 with lease ID 0xce46b989a0507481: from storage DS-82377799-5076-490f-925a-460019a7a47d node DatanodeRegistration(127.0.0.1:33021, datanodeUuid=0a7e7872-bf00-4ed0-8f92-bba4a2dd250c, infoPort=43051, infoSecurePort=0, ipcPort=35865, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:42:30,813 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x94fe528e34ba7770 with lease ID 0xce46b989a0507481: Processing first storage report for DS-af8a5a06-81b0-404d-8890-7ce2d943ff87 from datanode DatanodeRegistration(127.0.0.1:33021, datanodeUuid=0a7e7872-bf00-4ed0-8f92-bba4a2dd250c, infoPort=43051, infoSecurePort=0, ipcPort=35865, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378) 2024-11-18T22:42:30,813 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x94fe528e34ba7770 with lease ID 0xce46b989a0507481: from storage DS-af8a5a06-81b0-404d-8890-7ce2d943ff87 node DatanodeRegistration(127.0.0.1:33021, datanodeUuid=0a7e7872-bf00-4ed0-8f92-bba4a2dd250c, infoPort=43051, infoSecurePort=0, ipcPort=35865, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:42:31,026 WARN [Thread-879 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data7/current/BP-1358070786-172.17.0.2-1731969729378/current, will proceed with Du for space computation calculation, 2024-11-18T22:42:31,026 WARN [Thread-880 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data8/current/BP-1358070786-172.17.0.2-1731969729378/current, will proceed with Du for space computation calculation, 2024-11-18T22:42:31,101 WARN [Thread-831 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T22:42:31,110 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xac48ded1e5a7c55e with lease ID 0xce46b989a0507482: Processing first storage report for DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653 from datanode DatanodeRegistration(127.0.0.1:46537, datanodeUuid=901050c7-a3c6-4f78-bc37-197e9482d8c7, infoPort=38169, infoSecurePort=0, ipcPort=34581, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378) 2024-11-18T22:42:31,110 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xac48ded1e5a7c55e with lease ID 0xce46b989a0507482: from storage DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653 node DatanodeRegistration(127.0.0.1:46537, datanodeUuid=901050c7-a3c6-4f78-bc37-197e9482d8c7, infoPort=38169, infoSecurePort=0, ipcPort=34581, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:42:31,110 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xac48ded1e5a7c55e with lease ID 0xce46b989a0507482: Processing first storage report for DS-3b81bff4-a687-494b-8837-650f29ab41e3 from datanode DatanodeRegistration(127.0.0.1:46537, datanodeUuid=901050c7-a3c6-4f78-bc37-197e9482d8c7, infoPort=38169, infoSecurePort=0, ipcPort=34581, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378) 2024-11-18T22:42:31,110 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xac48ded1e5a7c55e with lease ID 0xce46b989a0507482: from storage DS-3b81bff4-a687-494b-8837-650f29ab41e3 node DatanodeRegistration(127.0.0.1:46537, datanodeUuid=901050c7-a3c6-4f78-bc37-197e9482d8c7, infoPort=38169, infoSecurePort=0, ipcPort=34581, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:42:31,245 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data9/current/BP-1358070786-172.17.0.2-1731969729378/current, will proceed with Du for space computation calculation, 2024-11-18T22:42:31,249 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data10/current/BP-1358070786-172.17.0.2-1731969729378/current, will proceed with Du for space computation calculation, 2024-11-18T22:42:31,325 WARN [Thread-853 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T22:42:31,339 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x358675b2c1c50726 with lease ID 0xce46b989a0507483: Processing first storage report for DS-4ea37519-5a3f-43ab-8020-00d146722880 from datanode DatanodeRegistration(127.0.0.1:39211, datanodeUuid=c90058f7-97fa-489f-b2dd-99c0507349b6, infoPort=44715, infoSecurePort=0, ipcPort=34621, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378) 2024-11-18T22:42:31,339 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x358675b2c1c50726 with lease ID 0xce46b989a0507483: from storage DS-4ea37519-5a3f-43ab-8020-00d146722880 node DatanodeRegistration(127.0.0.1:39211, datanodeUuid=c90058f7-97fa-489f-b2dd-99c0507349b6, infoPort=44715, infoSecurePort=0, ipcPort=34621, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:42:31,339 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x358675b2c1c50726 with lease ID 0xce46b989a0507483: Processing first storage report for DS-d823269c-f69c-438a-b137-38f5b589d72f from datanode DatanodeRegistration(127.0.0.1:39211, datanodeUuid=c90058f7-97fa-489f-b2dd-99c0507349b6, infoPort=44715, infoSecurePort=0, ipcPort=34621, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378) 2024-11-18T22:42:31,339 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x358675b2c1c50726 with lease ID 0xce46b989a0507483: from storage DS-d823269c-f69c-438a-b137-38f5b589d72f node DatanodeRegistration(127.0.0.1:39211, datanodeUuid=c90058f7-97fa-489f-b2dd-99c0507349b6, infoPort=44715, infoSecurePort=0, ipcPort=34621, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:42:31,374 WARN [ResponseProcessor for block BP-1358070786-172.17.0.2-1731969729378:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1358070786-172.17.0.2-1731969729378:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:31,374 WARN [ResponseProcessor for block BP-1358070786-172.17.0.2-1731969729378:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1358070786-172.17.0.2-1731969729378:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1358070786-172.17.0.2-1731969729378:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:31,375 WARN [DataStreamer for file /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969737096 block BP-1358070786-172.17.0.2-1731969729378:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK], DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK]) is bad. 2024-11-18T22:42:31,375 WARN [DataStreamer for file /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 block BP-1358070786-172.17.0.2-1731969729378:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK], DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK]) is bad. 2024-11-18T22:42:31,375 WARN [ResponseProcessor for block BP-1358070786-172.17.0.2-1731969729378:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1358070786-172.17.0.2-1731969729378:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1358070786-172.17.0.2-1731969729378:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:31,375 WARN [PacketResponder: BP-1358070786-172.17.0.2-1731969729378:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:34209] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:31,376 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-648000223_22 at /127.0.0.1:49852 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46627:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49852 dst: /127.0.0.1:46627 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:31,377 WARN [DataStreamer for file /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta block BP-1358070786-172.17.0.2-1731969729378:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK], DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK]) is bad. 2024-11-18T22:42:31,374 WARN [ResponseProcessor for block BP-1358070786-172.17.0.2-1731969729378:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1358070786-172.17.0.2-1731969729378:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:31,377 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:49876 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46627:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49876 dst: /127.0.0.1:46627 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:31,377 WARN [DataStreamer for file /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 block BP-1358070786-172.17.0.2-1731969729378:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK], DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK]) is bad. 2024-11-18T22:42:31,376 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:47028 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34209:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47028 dst: /127.0.0.1:34209 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:31,378 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670799047_22 at /127.0.0.1:47078 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:34209:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47078 dst: /127.0.0.1:34209 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:31,378 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-648000223_22 at /127.0.0.1:47000 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34209:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47000 dst: /127.0.0.1:34209 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:31,379 WARN [PacketResponder: BP-1358070786-172.17.0.2-1731969729378:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:34209] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:31,379 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:49892 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46627:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49892 dst: /127.0.0.1:46627 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:31,380 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@377a86a0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:42:31,382 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_670799047_22 at /127.0.0.1:49924 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:46627:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49924 dst: /127.0.0.1:46627 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:31,383 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@79bab151{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:42:31,383 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:42:31,384 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e633f36{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:42:31,384 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6609519d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/hadoop.log.dir/,STOPPED} 2024-11-18T22:42:31,384 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:47042 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34209:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47042 dst: /127.0.0.1:34209 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:31,389 WARN [BP-1358070786-172.17.0.2-1731969729378 heartbeating to localhost/127.0.0.1:42521 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T22:42:31,389 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T22:42:31,389 WARN [BP-1358070786-172.17.0.2-1731969729378 heartbeating to localhost/127.0.0.1:42521 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1358070786-172.17.0.2-1731969729378 (Datanode Uuid b20a2362-d157-4d15-ba99-ab9b389d4072) service to localhost/127.0.0.1:42521 2024-11-18T22:42:31,389 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T22:42:31,389 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data3/current/BP-1358070786-172.17.0.2-1731969729378 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:42:31,390 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data4/current/BP-1358070786-172.17.0.2-1731969729378 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:42:31,390 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T22:42:31,397 WARN [DataStreamer for file /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969737096 block BP-1358070786-172.17.0.2-1731969729378:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:31,397 WARN [DataStreamer for file /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta block BP-1358070786-172.17.0.2-1731969729378:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:31,399 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-648000223_22 at /127.0.0.1:54054 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46627:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54054 dst: /127.0.0.1:46627 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:31,405 WARN [ResponseProcessor for block BP-1358070786-172.17.0.2-1731969729378:blk_1073741830_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1358070786-172.17.0.2-1731969729378:blk_1073741830_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:31,411 WARN [BP-1358070786-172.17.0.2-1731969729378 heartbeating to localhost/127.0.0.1:42521 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1358070786-172.17.0.2-1731969729378 (Datanode Uuid f84f5c24-5701-467c-9225-34011d55d5db) service to localhost/127.0.0.1:42521 2024-11-18T22:42:31,413 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data2/current/BP-1358070786-172.17.0.2-1731969729378 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:42:31,413 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data1/current/BP-1358070786-172.17.0.2-1731969729378 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:42:31,418 WARN [DataStreamer for file /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 block BP-1358070786-172.17.0.2-1731969729378:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:31,422 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@503eedef{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:42:31,424 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@46acec94{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:42:31,424 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:42:31,424 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f0a9f88{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:42:31,424 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@acc902f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/hadoop.log.dir/,STOPPED} 2024-11-18T22:42:31,432 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T22:42:31,438 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea., hostname=3b0ca91ed64a,35719,1731969734188, seqNum=2] 2024-11-18T22:42:31,445 ERROR [FSHLog-0-hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153-prefix:3b0ca91ed64a,35719,1731969734188 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:31,445 WARN [FSHLog-0-hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153-prefix:3b0ca91ed64a,35719,1731969734188 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:31,446 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3b0ca91ed64a%2C35719%2C1731969734188:(num 1731969737096) roll requested 2024-11-18T22:42:31,446 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C35719%2C1731969734188.1731969751446 2024-11-18T22:42:31,479 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:31,480 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:31,480 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:31,480 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:31,480 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:31,481 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969737096 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969751446 2024-11-18T22:42:31,481 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:31,481 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:31,482 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-18T22:42:31,482 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-18T22:42:31,482 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969737096 2024-11-18T22:42:31,485 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43051:43051),(127.0.0.1/127.0.0.1:44715:44715)] 2024-11-18T22:42:31,485 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969737096 is not closed yet, will try archiving it next time 2024-11-18T22:42:31,486 WARN [IPC Server handler 3 on default port 42521 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969737096 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-18T22:42:31,490 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969737096 after 5ms 2024-11-18T22:42:32,493 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:33,085 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:33,486 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:33,496 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969751446 2024-11-18T22:42:33,497 WARN [ResponseProcessor for block BP-1358070786-172.17.0.2-1731969729378:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1358070786-172.17.0.2-1731969729378:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:33,497 WARN [DataStreamer for file /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969751446 block BP-1358070786-172.17.0.2-1731969729378:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK], DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]) is bad. 2024-11-18T22:42:33,497 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:42314 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:33021:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42314 dst: /127.0.0.1:33021 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:33,498 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:55352 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:39211:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55352 dst: /127.0.0.1:39211 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:33,508 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@24e79ea6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:42:33,509 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5e0d6f2f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:42:33,509 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:42:33,509 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3820ef29{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:42:33,509 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7278bc9b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/hadoop.log.dir/,STOPPED} 2024-11-18T22:42:33,511 WARN [BP-1358070786-172.17.0.2-1731969729378 heartbeating to localhost/127.0.0.1:42521 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T22:42:33,511 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T22:42:33,511 WARN [BP-1358070786-172.17.0.2-1731969729378 heartbeating to localhost/127.0.0.1:42521 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1358070786-172.17.0.2-1731969729378 (Datanode Uuid 0a7e7872-bf00-4ed0-8f92-bba4a2dd250c) service to localhost/127.0.0.1:42521 2024-11-18T22:42:33,511 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T22:42:33,512 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data5/current/BP-1358070786-172.17.0.2-1731969729378 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:42:33,512 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data6/current/BP-1358070786-172.17.0.2-1731969729378 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:42:33,512 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T22:42:34,493 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:35,085 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:35,486 WARN [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]] 2024-11-18T22:42:35,486 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:35,487 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3b0ca91ed64a%2C35719%2C1731969734188:(num 1731969751446) roll requested 2024-11-18T22:42:35,487 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C35719%2C1731969734188.1731969755487 2024-11-18T22:42:35,494 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969737096 after 4011ms 2024-11-18T22:42:35,520 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:35,522 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:35,522 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:35,524 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:35,525 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:35,525 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969751446 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969755487 2024-11-18T22:42:35,530 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44715:44715),(127.0.0.1/127.0.0.1:38169:38169)] 2024-11-18T22:42:35,530 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969737096 is not closed yet, will try archiving it next time 2024-11-18T22:42:35,530 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969751446 is not closed yet, will try archiving it next time 2024-11-18T22:42:35,532 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-18T22:42:35,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39211 is added to blk_1073741838_1020 (size=2431) 2024-11-18T22:42:35,536 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969737096 is not closed yet, will try archiving it next time 2024-11-18T22:42:36,494 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:37,086 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:37,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741838_1020 (size=2431) 2024-11-18T22:42:37,530 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:37,544 WARN [ResponseProcessor for block BP-1358070786-172.17.0.2-1731969729378:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1358070786-172.17.0.2-1731969729378:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:37,545 WARN [DataStreamer for file /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969755487 block BP-1358070786-172.17.0.2-1731969729378:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK], DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]) is bad. 2024-11-18T22:42:37,545 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:44366 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:39211:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44366 dst: /127.0.0.1:39211 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:37,546 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:41936 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:46537:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41936 dst: /127.0.0.1:46537 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:37,548 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5144450d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:42:37,548 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@300b140{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:42:37,548 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:42:37,549 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@23c971b8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:42:37,549 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c2aec0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/hadoop.log.dir/,STOPPED} 2024-11-18T22:42:37,551 WARN [BP-1358070786-172.17.0.2-1731969729378 heartbeating to localhost/127.0.0.1:42521 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T22:42:37,551 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T22:42:37,551 WARN [BP-1358070786-172.17.0.2-1731969729378 heartbeating to localhost/127.0.0.1:42521 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1358070786-172.17.0.2-1731969729378 (Datanode Uuid c90058f7-97fa-489f-b2dd-99c0507349b6) service to localhost/127.0.0.1:42521 2024-11-18T22:42:37,551 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T22:42:37,551 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data9/current/BP-1358070786-172.17.0.2-1731969729378 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:42:37,552 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data10/current/BP-1358070786-172.17.0.2-1731969729378 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:42:37,552 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T22:42:37,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35719 {}] regionserver.HRegion(8855): Flush requested on 15e7abf84071cfc525f00a5a896716ea 2024-11-18T22:42:37,562 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 15e7abf84071cfc525f00a5a896716ea 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T22:42:37,584 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/5282a32c76864b9c8a6c93bf9146a242 is 1080, key is row0002/info:/1731969753514/Put/seqid=0 2024-11-18T22:42:37,589 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:37,590 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741840_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK], DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]) is bad. 2024-11-18T22:42:37,590 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741840_1023 2024-11-18T22:42:37,593 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK] 2024-11-18T22:42:37,596 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1024 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:37,596 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741841_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK], DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK]) is bad. 2024-11-18T22:42:37,596 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741841_1024 2024-11-18T22:42:37,597 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK] 2024-11-18T22:42:37,600 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33021 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:37,600 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:41982 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741842_1025] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data8]'}, localName='127.0.0.1:46537', datanodeUuid='901050c7-a3c6-4f78-bc37-197e9482d8c7', xmitsInProgress=0}:Exception transferring block BP-1358070786-172.17.0.2-1731969729378:blk_1073741842_1025 to mirror 127.0.0.1:33021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:37,600 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK], DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]) is bad. 2024-11-18T22:42:37,600 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741842_1025 2024-11-18T22:42:37,600 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:41982 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741842_1025] {}] datanode.BlockReceiver(316): Block 1073741842 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T22:42:37,600 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:41982 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741842_1025] {}] datanode.DataXceiver(331): 127.0.0.1:46537:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41982 dst: /127.0.0.1:46537 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:37,601 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK] 2024-11-18T22:42:37,605 WARN [Thread-924 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39211 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:37,605 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:41992 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741843_1026] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data8]'}, localName='127.0.0.1:46537', datanodeUuid='901050c7-a3c6-4f78-bc37-197e9482d8c7', xmitsInProgress=0}:Exception transferring block BP-1358070786-172.17.0.2-1731969729378:blk_1073741843_1026 to mirror 127.0.0.1:39211 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:37,605 WARN [Thread-924 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK], DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]) is bad. 2024-11-18T22:42:37,605 WARN [Thread-924 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741843_1026 2024-11-18T22:42:37,605 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:41992 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741843_1026] {}] datanode.BlockReceiver(316): Block 1073741843 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T22:42:37,605 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:41992 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741843_1026] {}] datanode.DataXceiver(331): 127.0.0.1:46537:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41992 dst: /127.0.0.1:46537 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:37,607 WARN [Thread-924 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK] 2024-11-18T22:42:37,608 WARN [IPC Server handler 4 on default port 42521 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T22:42:37,608 WARN [IPC Server handler 4 on default port 42521 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T22:42:37,608 WARN [IPC Server handler 4 on default port 42521 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T22:42:37,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741844_1027 (size=10347) 2024-11-18T22:42:37,628 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/5282a32c76864b9c8a6c93bf9146a242 2024-11-18T22:42:37,646 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/5282a32c76864b9c8a6c93bf9146a242 as hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/5282a32c76864b9c8a6c93bf9146a242 2024-11-18T22:42:37,656 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/5282a32c76864b9c8a6c93bf9146a242, entries=5, sequenceid=11, filesize=10.1 K 2024-11-18T22:42:37,657 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=7.35 KB/7531 for 15e7abf84071cfc525f00a5a896716ea in 95ms, sequenceid=11, compaction requested=false 2024-11-18T22:42:37,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 15e7abf84071cfc525f00a5a896716ea: 2024-11-18T22:42:37,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35719 {}] regionserver.HRegion(8855): Flush requested on 15e7abf84071cfc525f00a5a896716ea 2024-11-18T22:42:37,803 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 15e7abf84071cfc525f00a5a896716ea 1/1 column families, dataSize=8.40 KB heapSize=9.25 KB 2024-11-18T22:42:37,811 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/1cd917041d6b40b3a396bec021842bd3 is 1080, key is row0007/info:/1731969757564/Put/seqid=0 2024-11-18T22:42:37,813 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:37,813 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK], DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK]) is bad. 2024-11-18T22:42:37,813 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741845_1028 2024-11-18T22:42:37,814 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK] 2024-11-18T22:42:37,815 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:37,815 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK], DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]) is bad. 2024-11-18T22:42:37,816 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741846_1029 2024-11-18T22:42:37,819 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK] 2024-11-18T22:42:37,828 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:42012 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741847_1030] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data8]'}, localName='127.0.0.1:46537', datanodeUuid='901050c7-a3c6-4f78-bc37-197e9482d8c7', xmitsInProgress=0}:Exception transferring block BP-1358070786-172.17.0.2-1731969729378:blk_1073741847_1030 to mirror 127.0.0.1:33021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:37,828 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33021 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:37,829 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK], DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]) is bad. 2024-11-18T22:42:37,829 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:42012 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741847_1030] {}] datanode.BlockReceiver(316): Block 1073741847 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T22:42:37,829 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741847_1030 2024-11-18T22:42:37,829 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:42012 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741847_1030] {}] datanode.DataXceiver(331): 127.0.0.1:46537:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42012 dst: /127.0.0.1:46537 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:37,830 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK] 2024-11-18T22:42:37,839 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39211 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:37,839 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:42020 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741848_1031] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data8]'}, localName='127.0.0.1:46537', datanodeUuid='901050c7-a3c6-4f78-bc37-197e9482d8c7', xmitsInProgress=0}:Exception transferring block BP-1358070786-172.17.0.2-1731969729378:blk_1073741848_1031 to mirror 127.0.0.1:39211 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:37,839 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK], DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]) is bad. 2024-11-18T22:42:37,839 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741848_1031 2024-11-18T22:42:37,840 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:42020 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741848_1031] {}] datanode.BlockReceiver(316): Block 1073741848 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T22:42:37,840 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:42020 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741848_1031] {}] datanode.DataXceiver(331): 127.0.0.1:46537:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42020 dst: /127.0.0.1:46537 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:37,840 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK] 2024-11-18T22:42:37,842 WARN [IPC Server handler 1 on default port 42521 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T22:42:37,842 WARN [IPC Server handler 1 on default port 42521 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T22:42:37,842 WARN [IPC Server handler 1 on default port 42521 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T22:42:37,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741849_1032 (size=12506) 2024-11-18T22:42:38,260 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.40 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/1cd917041d6b40b3a396bec021842bd3 2024-11-18T22:42:38,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/1cd917041d6b40b3a396bec021842bd3 as hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/1cd917041d6b40b3a396bec021842bd3 2024-11-18T22:42:38,296 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/1cd917041d6b40b3a396bec021842bd3, entries=7, sequenceid=22, filesize=12.2 K 2024-11-18T22:42:38,306 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.40 KB/8606, heapSize ~9.23 KB/9456, currentSize=2.10 KB/2150 for 15e7abf84071cfc525f00a5a896716ea in 504ms, sequenceid=22, compaction requested=false 2024-11-18T22:42:38,306 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 15e7abf84071cfc525f00a5a896716ea: 2024-11-18T22:42:38,307 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-18T22:42:38,307 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:42:38,307 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/1cd917041d6b40b3a396bec021842bd3 because midkey is the same as first or last row 2024-11-18T22:42:38,494 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:39,086 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:39,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35719 {}] regionserver.HRegion(8855): Flush requested on 15e7abf84071cfc525f00a5a896716ea 2024-11-18T22:42:39,230 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 15e7abf84071cfc525f00a5a896716ea 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-18T22:42:39,235 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/fb8b5f89c7874efd8085c148dbe7ec4e is 1079, key is tmprow/info:/1731969759229/Put/seqid=0 2024-11-18T22:42:39,237 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:39,237 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK], DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]) is bad. 2024-11-18T22:42:39,237 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741850_1033 2024-11-18T22:42:39,238 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK] 2024-11-18T22:42:39,240 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:39,241 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK], DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]) is bad. 2024-11-18T22:42:39,241 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741851_1034 2024-11-18T22:42:39,242 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK] 2024-11-18T22:42:39,252 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39211 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:39,252 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK], DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]) is bad. 2024-11-18T22:42:39,252 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741852_1035 2024-11-18T22:42:39,253 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:42032 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741852_1035] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data8]'}, localName='127.0.0.1:46537', datanodeUuid='901050c7-a3c6-4f78-bc37-197e9482d8c7', xmitsInProgress=0}:Exception transferring block BP-1358070786-172.17.0.2-1731969729378:blk_1073741852_1035 to mirror 127.0.0.1:39211 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:39,253 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:42032 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741852_1035] {}] datanode.BlockReceiver(316): Block 1073741852 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T22:42:39,253 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:42032 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741852_1035] {}] datanode.DataXceiver(331): 127.0.0.1:46537:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42032 dst: /127.0.0.1:46537 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:39,254 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK] 2024-11-18T22:42:39,256 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:39,256 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK], DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK]) is bad. 2024-11-18T22:42:39,256 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741853_1036 2024-11-18T22:42:39,257 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK] 2024-11-18T22:42:39,258 WARN [IPC Server handler 3 on default port 42521 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T22:42:39,258 WARN [IPC Server handler 3 on default port 42521 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T22:42:39,258 WARN [IPC Server handler 3 on default port 42521 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T22:42:39,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741854_1037 (size=6027) 2024-11-18T22:42:39,269 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=32 (bloomFilter=true), to=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/fb8b5f89c7874efd8085c148dbe7ec4e 2024-11-18T22:42:39,279 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/fb8b5f89c7874efd8085c148dbe7ec4e as hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/fb8b5f89c7874efd8085c148dbe7ec4e 2024-11-18T22:42:39,293 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/fb8b5f89c7874efd8085c148dbe7ec4e, entries=1, sequenceid=32, filesize=5.9 K 2024-11-18T22:42:39,295 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 15e7abf84071cfc525f00a5a896716ea in 64ms, sequenceid=32, compaction requested=true 2024-11-18T22:42:39,295 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 15e7abf84071cfc525f00a5a896716ea: 2024-11-18T22:42:39,295 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-18T22:42:39,295 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:42:39,295 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/1cd917041d6b40b3a396bec021842bd3 because midkey is the same as first or last row 2024-11-18T22:42:39,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 15e7abf84071cfc525f00a5a896716ea:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T22:42:39,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:42:39,296 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T22:42:39,302 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T22:42:39,302 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.HStore(1541): 15e7abf84071cfc525f00a5a896716ea/info is initiating minor compaction (all files) 2024-11-18T22:42:39,302 INFO [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 15e7abf84071cfc525f00a5a896716ea/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea. 2024-11-18T22:42:39,303 INFO [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/5282a32c76864b9c8a6c93bf9146a242, hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/1cd917041d6b40b3a396bec021842bd3, hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/fb8b5f89c7874efd8085c148dbe7ec4e] into tmpdir=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp, totalSize=28.2 K 2024-11-18T22:42:39,303 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5282a32c76864b9c8a6c93bf9146a242, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731969753514 2024-11-18T22:42:39,304 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1cd917041d6b40b3a396bec021842bd3, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=22, earliestPutTs=1731969757564 2024-11-18T22:42:39,305 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] compactions.Compactor(225): Compacting fb8b5f89c7874efd8085c148dbe7ec4e, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=32, earliestPutTs=1731969759229 2024-11-18T22:42:39,340 INFO [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 15e7abf84071cfc525f00a5a896716ea#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T22:42:39,341 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/f6630933e5d248a3b16e87c28515ada2 is 1080, key is row0002/info:/1731969753514/Put/seqid=0 2024-11-18T22:42:39,349 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:39,349 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK], DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]) is bad. 2024-11-18T22:42:39,349 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741855_1038 2024-11-18T22:42:39,350 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK] 2024-11-18T22:42:39,353 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:39,354 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK], DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]) is bad. 2024-11-18T22:42:39,354 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741856_1039 2024-11-18T22:42:39,355 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK] 2024-11-18T22:42:39,357 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:39,357 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK], DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]) is bad. 2024-11-18T22:42:39,357 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741857_1040 2024-11-18T22:42:39,360 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK] 2024-11-18T22:42:39,365 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34209 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:39,365 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:42060 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741858_1041] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data8]'}, localName='127.0.0.1:46537', datanodeUuid='901050c7-a3c6-4f78-bc37-197e9482d8c7', xmitsInProgress=0}:Exception transferring block BP-1358070786-172.17.0.2-1731969729378:blk_1073741858_1041 to mirror 127.0.0.1:34209 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:39,366 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK], DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK]) is bad. 2024-11-18T22:42:39,366 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741858_1041 2024-11-18T22:42:39,366 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:42060 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741858_1041] {}] datanode.BlockReceiver(316): Block 1073741858 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T22:42:39,366 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:42060 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741858_1041] {}] datanode.DataXceiver(331): 127.0.0.1:46537:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42060 dst: /127.0.0.1:46537 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:39,367 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK] 2024-11-18T22:42:39,368 WARN [IPC Server handler 3 on default port 42521 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T22:42:39,368 WARN [IPC Server handler 3 on default port 42521 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T22:42:39,369 WARN [IPC Server handler 3 on default port 42521 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T22:42:39,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741859_1042 (size=17994) 2024-11-18T22:42:39,532 WARN [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK]] 2024-11-18T22:42:39,532 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:39,533 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3b0ca91ed64a%2C35719%2C1731969734188:(num 1731969755487) roll requested 2024-11-18T22:42:39,533 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C35719%2C1731969734188.1731969759533 2024-11-18T22:42:39,541 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:39,541 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK], DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK]) is bad. 2024-11-18T22:42:39,541 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741860_1043 2024-11-18T22:42:39,542 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK] 2024-11-18T22:42:39,544 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:39,544 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK], DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]) is bad. 2024-11-18T22:42:39,544 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741861_1044 2024-11-18T22:42:39,549 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK] 2024-11-18T22:42:39,553 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:39,553 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK], DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]) is bad. 2024-11-18T22:42:39,553 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741862_1045 2024-11-18T22:42:39,555 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK] 2024-11-18T22:42:39,556 WARN [Thread-949 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:39,557 WARN [Thread-949 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK], DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]) is bad. 2024-11-18T22:42:39,557 WARN [Thread-949 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741863_1046 2024-11-18T22:42:39,557 WARN [Thread-949 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK] 2024-11-18T22:42:39,558 WARN [IPC Server handler 4 on default port 42521 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T22:42:39,559 WARN [IPC Server handler 4 on default port 42521 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T22:42:39,559 WARN [IPC Server handler 4 on default port 42521 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T22:42:39,572 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:39,580 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:39,580 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:39,584 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:39,584 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:39,585 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969755487 with entries=27, filesize=26.04 KB; new WAL /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969759533 2024-11-18T22:42:39,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741839_1022 (size=26672) 2024-11-18T22:42:39,604 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38169:38169)] 2024-11-18T22:42:39,605 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969737096 is not closed yet, will try archiving it next time 2024-11-18T22:42:39,605 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969755487 is not closed yet, will try archiving it next time 2024-11-18T22:42:39,608 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969751446 to hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/oldWALs/3b0ca91ed64a%2C35719%2C1731969734188.1731969751446 2024-11-18T22:42:39,845 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/f6630933e5d248a3b16e87c28515ada2 as hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/f6630933e5d248a3b16e87c28515ada2 2024-11-18T22:42:39,873 INFO [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 15e7abf84071cfc525f00a5a896716ea/info of 15e7abf84071cfc525f00a5a896716ea into f6630933e5d248a3b16e87c28515ada2(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T22:42:39,873 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 15e7abf84071cfc525f00a5a896716ea: 2024-11-18T22:42:39,874 INFO [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea., storeName=15e7abf84071cfc525f00a5a896716ea/info, priority=13, startTime=1731969759295; duration=0sec 2024-11-18T22:42:39,874 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-18T22:42:39,874 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:42:39,874 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/f6630933e5d248a3b16e87c28515ada2 because midkey is the same as first or last row 2024-11-18T22:42:39,874 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-18T22:42:39,874 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:42:39,874 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/f6630933e5d248a3b16e87c28515ada2 because midkey is the same as first or last row 2024-11-18T22:42:39,874 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-18T22:42:39,874 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:42:39,874 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/f6630933e5d248a3b16e87c28515ada2 because midkey is the same as first or last row 2024-11-18T22:42:39,875 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:42:39,875 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 15e7abf84071cfc525f00a5a896716ea:info 2024-11-18T22:42:40,000 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969737096 is not closed yet, will try archiving it next time 2024-11-18T22:42:40,495 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:40,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35719 {}] regionserver.HRegion(8855): Flush requested on 15e7abf84071cfc525f00a5a896716ea 2024-11-18T22:42:40,696 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 15e7abf84071cfc525f00a5a896716ea 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-18T22:42:40,702 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/e249ead501bc4e1989cf1540ee0b112a is 1079, key is tmprow/info:/1731969760691/Put/seqid=0 2024-11-18T22:42:40,707 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33021 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:40,707 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:42094 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741865_1048] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data8]'}, localName='127.0.0.1:46537', datanodeUuid='901050c7-a3c6-4f78-bc37-197e9482d8c7', xmitsInProgress=0}:Exception transferring block BP-1358070786-172.17.0.2-1731969729378:blk_1073741865_1048 to mirror 127.0.0.1:33021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:40,707 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK], DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]) is bad. 2024-11-18T22:42:40,707 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741865_1048 2024-11-18T22:42:40,707 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:42094 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741865_1048] {}] datanode.BlockReceiver(316): Block 1073741865 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T22:42:40,707 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:42094 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741865_1048] {}] datanode.DataXceiver(331): 127.0.0.1:46537:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42094 dst: /127.0.0.1:46537 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:40,709 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK] 2024-11-18T22:42:40,717 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39211 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:40,717 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:42096 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741866_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data8]'}, localName='127.0.0.1:46537', datanodeUuid='901050c7-a3c6-4f78-bc37-197e9482d8c7', xmitsInProgress=0}:Exception transferring block BP-1358070786-172.17.0.2-1731969729378:blk_1073741866_1049 to mirror 127.0.0.1:39211 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:40,717 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK], DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]) is bad. 2024-11-18T22:42:40,717 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741866_1049 2024-11-18T22:42:40,717 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:42096 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741866_1049] {}] datanode.BlockReceiver(316): Block 1073741866 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T22:42:40,718 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:42096 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741866_1049] {}] datanode.DataXceiver(331): 127.0.0.1:46537:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42096 dst: /127.0.0.1:46537 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:40,719 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK] 2024-11-18T22:42:40,723 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:40,723 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK], DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]) is bad. 2024-11-18T22:42:40,724 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741867_1050 2024-11-18T22:42:40,725 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK] 2024-11-18T22:42:40,730 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:40,730 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK], DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK]) is bad. 2024-11-18T22:42:40,730 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741868_1051 2024-11-18T22:42:40,736 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK] 2024-11-18T22:42:40,741 WARN [IPC Server handler 2 on default port 42521 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T22:42:40,741 WARN [IPC Server handler 2 on default port 42521 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T22:42:40,741 WARN [IPC Server handler 2 on default port 42521 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T22:42:40,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741869_1052 (size=6027) 2024-11-18T22:42:41,087 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:41,117 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@73671078[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46537, datanodeUuid=901050c7-a3c6-4f78-bc37-197e9482d8c7, infoPort=38169, infoSecurePort=0, ipcPort=34581, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378):Failed to transfer BP-1358070786-172.17.0.2-1731969729378:blk_1073741849_1032 to 127.0.0.1:39211 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:41,117 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7da0e1bc[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46537, datanodeUuid=901050c7-a3c6-4f78-bc37-197e9482d8c7, infoPort=38169, infoSecurePort=0, ipcPort=34581, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378):Failed to transfer BP-1358070786-172.17.0.2-1731969729378:blk_1073741844_1027 to 127.0.0.1:46627 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:41,184 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/e249ead501bc4e1989cf1540ee0b112a 2024-11-18T22:42:41,209 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/e249ead501bc4e1989cf1540ee0b112a as hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/e249ead501bc4e1989cf1540ee0b112a 2024-11-18T22:42:41,222 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/e249ead501bc4e1989cf1540ee0b112a, entries=1, sequenceid=43, filesize=5.9 K 2024-11-18T22:42:41,223 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 15e7abf84071cfc525f00a5a896716ea in 528ms, sequenceid=43, compaction requested=false 2024-11-18T22:42:41,224 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 15e7abf84071cfc525f00a5a896716ea: 2024-11-18T22:42:41,224 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-18T22:42:41,224 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:42:41,224 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/f6630933e5d248a3b16e87c28515ada2 because midkey is the same as first or last row 2024-11-18T22:42:41,605 WARN [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK]] 2024-11-18T22:42:41,605 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:41,606 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3b0ca91ed64a%2C35719%2C1731969734188:(num 1731969759533) roll requested 2024-11-18T22:42:41,606 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C35719%2C1731969734188.1731969761606 2024-11-18T22:42:41,621 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:41,622 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK], DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]) is bad. 2024-11-18T22:42:41,622 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741870_1053 2024-11-18T22:42:41,624 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK] 2024-11-18T22:42:41,626 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:41,626 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK], DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]) is bad. 2024-11-18T22:42:41,626 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741871_1054 2024-11-18T22:42:41,628 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK] 2024-11-18T22:42:41,631 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:41,631 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK], DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK]) is bad. 2024-11-18T22:42:41,631 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741872_1055 2024-11-18T22:42:41,632 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK] 2024-11-18T22:42:41,650 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:42106 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741873_1056] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data8]'}, localName='127.0.0.1:46537', datanodeUuid='901050c7-a3c6-4f78-bc37-197e9482d8c7', xmitsInProgress=0}:Exception transferring block BP-1358070786-172.17.0.2-1731969729378:blk_1073741873_1056 to mirror 127.0.0.1:33021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:41,651 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:42106 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741873_1056] {}] datanode.BlockReceiver(316): Block 1073741873 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-18T22:42:41,651 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33021 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:41,651 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK], DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]) is bad. 2024-11-18T22:42:41,651 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:42106 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741873_1056] {}] datanode.DataXceiver(331): 127.0.0.1:46537:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42106 dst: /127.0.0.1:46537 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:41,651 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741873_1056 2024-11-18T22:42:41,652 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK] 2024-11-18T22:42:41,654 WARN [IPC Server handler 2 on default port 42521 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T22:42:41,654 WARN [IPC Server handler 2 on default port 42521 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T22:42:41,654 WARN [IPC Server handler 2 on default port 42521 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T22:42:41,704 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:41,704 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:41,704 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:41,704 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:41,705 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:41,705 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969759533 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969761606 2024-11-18T22:42:41,710 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38169:38169)] 2024-11-18T22:42:41,710 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969737096 is not closed yet, will try archiving it next time 2024-11-18T22:42:41,710 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969759533 is not closed yet, will try archiving it next time 2024-11-18T22:42:41,710 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969755487 to hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/oldWALs/3b0ca91ed64a%2C35719%2C1731969734188.1731969755487 2024-11-18T22:42:41,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741864_1047 (size=12911) 2024-11-18T22:42:42,111 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969737096 is not closed yet, will try archiving it next time 2024-11-18T22:42:42,112 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@73671078[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46537, datanodeUuid=901050c7-a3c6-4f78-bc37-197e9482d8c7, infoPort=38169, infoSecurePort=0, ipcPort=34581, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378):Failed to transfer BP-1358070786-172.17.0.2-1731969729378:blk_1073741859_1042 to 127.0.0.1:33021 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:42,112 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7da0e1bc[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46537, datanodeUuid=901050c7-a3c6-4f78-bc37-197e9482d8c7, infoPort=38169, infoSecurePort=0, ipcPort=34581, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378):Failed to transfer BP-1358070786-172.17.0.2-1731969729378:blk_1073741854_1037 to 127.0.0.1:33021 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:42,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35719 {}] regionserver.HRegion(8855): Flush requested on 15e7abf84071cfc525f00a5a896716ea 2024-11-18T22:42:42,136 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 15e7abf84071cfc525f00a5a896716ea 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-18T22:42:42,149 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/4194a6b1dac7488c94ac1834cd0d9ed8 is 1079, key is tmprow/info:/1731969762134/Put/seqid=0 2024-11-18T22:42:42,151 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:42,151 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK], DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]) is bad. 2024-11-18T22:42:42,151 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741875_1058 2024-11-18T22:42:42,152 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK] 2024-11-18T22:42:42,154 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:42,154 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK], DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]) is bad. 2024-11-18T22:42:42,154 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741876_1059 2024-11-18T22:42:42,155 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK] 2024-11-18T22:42:42,157 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:42,157 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK], DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]) is bad. 2024-11-18T22:42:42,157 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741877_1060 2024-11-18T22:42:42,164 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK] 2024-11-18T22:42:42,193 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:60922 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741878_1061] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data8]'}, localName='127.0.0.1:46537', datanodeUuid='901050c7-a3c6-4f78-bc37-197e9482d8c7', xmitsInProgress=0}:Exception transferring block BP-1358070786-172.17.0.2-1731969729378:blk_1073741878_1061 to mirror 127.0.0.1:34209 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:42,194 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:60922 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741878_1061] {}] datanode.BlockReceiver(316): Block 1073741878 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T22:42:42,194 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:60922 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741878_1061] {}] datanode.DataXceiver(331): 127.0.0.1:46537:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60922 dst: /127.0.0.1:46537 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:42,195 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34209 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:42,195 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK], DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK]) is bad. 2024-11-18T22:42:42,195 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741878_1061 2024-11-18T22:42:42,200 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK] 2024-11-18T22:42:42,201 WARN [IPC Server handler 4 on default port 42521 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T22:42:42,201 WARN [IPC Server handler 4 on default port 42521 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T22:42:42,201 WARN [IPC Server handler 4 on default port 42521 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T22:42:42,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741879_1062 (size=6027) 2024-11-18T22:42:42,227 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/4194a6b1dac7488c94ac1834cd0d9ed8 2024-11-18T22:42:42,251 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/4194a6b1dac7488c94ac1834cd0d9ed8 as hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/4194a6b1dac7488c94ac1834cd0d9ed8 2024-11-18T22:42:42,265 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/4194a6b1dac7488c94ac1834cd0d9ed8, entries=1, sequenceid=53, filesize=5.9 K 2024-11-18T22:42:42,268 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 15e7abf84071cfc525f00a5a896716ea in 132ms, sequenceid=53, compaction requested=true 2024-11-18T22:42:42,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 15e7abf84071cfc525f00a5a896716ea: 2024-11-18T22:42:42,268 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-18T22:42:42,268 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:42:42,268 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/f6630933e5d248a3b16e87c28515ada2 because midkey is the same as first or last row 2024-11-18T22:42:42,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 15e7abf84071cfc525f00a5a896716ea:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T22:42:42,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:42:42,269 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T22:42:42,274 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T22:42:42,274 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.HStore(1541): 15e7abf84071cfc525f00a5a896716ea/info is initiating minor compaction (all files) 2024-11-18T22:42:42,274 INFO [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 15e7abf84071cfc525f00a5a896716ea/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea. 2024-11-18T22:42:42,274 INFO [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/f6630933e5d248a3b16e87c28515ada2, hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/e249ead501bc4e1989cf1540ee0b112a, hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/4194a6b1dac7488c94ac1834cd0d9ed8] into tmpdir=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp, totalSize=29.3 K 2024-11-18T22:42:42,276 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] compactions.Compactor(225): Compacting f6630933e5d248a3b16e87c28515ada2, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=32, earliestPutTs=1731969753514 2024-11-18T22:42:42,278 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] compactions.Compactor(225): Compacting e249ead501bc4e1989cf1540ee0b112a, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1731969760691 2024-11-18T22:42:42,279 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4194a6b1dac7488c94ac1834cd0d9ed8, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1731969762134 2024-11-18T22:42:42,337 INFO [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 15e7abf84071cfc525f00a5a896716ea#info#compaction#24 average throughput is 1.76 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T22:42:42,338 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/0af1e21fa69f46d89456ea43e823a8f7 is 1080, key is row0002/info:/1731969753514/Put/seqid=0 2024-11-18T22:42:42,341 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:42,341 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK], DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK]) is bad. 2024-11-18T22:42:42,341 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741880_1063 2024-11-18T22:42:42,345 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK] 2024-11-18T22:42:42,347 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:42,347 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK], DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]) is bad. 2024-11-18T22:42:42,347 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741881_1064 2024-11-18T22:42:42,352 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK] 2024-11-18T22:42:42,357 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:42,357 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK], DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]) is bad. 2024-11-18T22:42:42,357 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741882_1065 2024-11-18T22:42:42,359 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK] 2024-11-18T22:42:42,361 WARN [Thread-973 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:42,361 WARN [Thread-973 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK], DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]) is bad. 2024-11-18T22:42:42,361 WARN [Thread-973 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741883_1066 2024-11-18T22:42:42,365 WARN [Thread-973 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK] 2024-11-18T22:42:42,368 WARN [IPC Server handler 1 on default port 42521 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T22:42:42,368 WARN [IPC Server handler 1 on default port 42521 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T22:42:42,368 WARN [IPC Server handler 1 on default port 42521 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T22:42:42,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741884_1067 (size=18097) 2024-11-18T22:42:42,418 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/0af1e21fa69f46d89456ea43e823a8f7 as hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/0af1e21fa69f46d89456ea43e823a8f7 2024-11-18T22:42:42,453 INFO [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 15e7abf84071cfc525f00a5a896716ea/info of 15e7abf84071cfc525f00a5a896716ea into 0af1e21fa69f46d89456ea43e823a8f7(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T22:42:42,453 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 15e7abf84071cfc525f00a5a896716ea: 2024-11-18T22:42:42,453 INFO [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea., storeName=15e7abf84071cfc525f00a5a896716ea/info, priority=13, startTime=1731969762268; duration=0sec 2024-11-18T22:42:42,453 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-18T22:42:42,453 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:42:42,453 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/0af1e21fa69f46d89456ea43e823a8f7 because midkey is the same as first or last row 2024-11-18T22:42:42,453 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-18T22:42:42,454 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:42:42,454 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/0af1e21fa69f46d89456ea43e823a8f7 because midkey is the same as first or last row 2024-11-18T22:42:42,454 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-18T22:42:42,454 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:42:42,454 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/0af1e21fa69f46d89456ea43e823a8f7 because midkey is the same as first or last row 2024-11-18T22:42:42,454 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:42:42,454 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 15e7abf84071cfc525f00a5a896716ea:info 2024-11-18T22:42:42,495 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:42,770 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T22:42:43,088 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:43,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35719 {}] regionserver.HRegion(8855): Flush requested on 15e7abf84071cfc525f00a5a896716ea 2024-11-18T22:42:43,581 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 15e7abf84071cfc525f00a5a896716ea 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-18T22:42:43,588 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/08a03ae41e48431290ab26f84db79f71 is 1079, key is tmprow/info:/1731969763580/Put/seqid=0 2024-11-18T22:42:43,591 WARN [Thread-978 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:43,591 WARN [Thread-978 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK], DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]) is bad. 2024-11-18T22:42:43,591 WARN [Thread-978 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741885_1068 2024-11-18T22:42:43,592 WARN [Thread-978 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK] 2024-11-18T22:42:43,594 WARN [Thread-978 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:43,594 WARN [Thread-978 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK], DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]) is bad. 2024-11-18T22:42:43,594 WARN [Thread-978 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741886_1069 2024-11-18T22:42:43,595 WARN [Thread-978 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK] 2024-11-18T22:42:43,597 WARN [Thread-978 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:43,597 WARN [Thread-978 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK], DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]) is bad. 2024-11-18T22:42:43,597 WARN [Thread-978 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741887_1070 2024-11-18T22:42:43,598 WARN [Thread-978 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK] 2024-11-18T22:42:43,601 WARN [Thread-978 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1071 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34209 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:43,601 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:60958 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741888_1071] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data8]'}, localName='127.0.0.1:46537', datanodeUuid='901050c7-a3c6-4f78-bc37-197e9482d8c7', xmitsInProgress=0}:Exception transferring block BP-1358070786-172.17.0.2-1731969729378:blk_1073741888_1071 to mirror 127.0.0.1:34209 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:43,602 WARN [Thread-978 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741888_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK], DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK]) is bad. 2024-11-18T22:42:43,602 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:60958 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741888_1071] {}] datanode.BlockReceiver(316): Block 1073741888 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T22:42:43,602 WARN [Thread-978 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741888_1071 2024-11-18T22:42:43,602 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:60958 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741888_1071] {}] datanode.DataXceiver(331): 127.0.0.1:46537:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60958 dst: /127.0.0.1:46537 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:43,603 WARN [Thread-978 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34209,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK] 2024-11-18T22:42:43,604 WARN [IPC Server handler 0 on default port 42521 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-18T22:42:43,604 WARN [IPC Server handler 0 on default port 42521 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-18T22:42:43,604 WARN [IPC Server handler 0 on default port 42521 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-18T22:42:43,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741889_1072 (size=6027) 2024-11-18T22:42:43,710 WARN [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-18T22:42:43,711 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:43,828 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:42:43,840 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:42:43,870 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:42:43,871 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:42:43,871 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T22:42:43,885 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1c713165{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:42:43,886 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4faf4ee8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:42:44,017 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=64 (bloomFilter=true), to=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/08a03ae41e48431290ab26f84db79f71 2024-11-18T22:42:44,043 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3e6567e4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/java.io.tmpdir/jetty-localhost-38139-hadoop-hdfs-3_4_1-tests_jar-_-any-6398788089509662247/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:42:44,045 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@278e888c{HTTP/1.1, (http/1.1)}{localhost:38139} 2024-11-18T22:42:44,045 INFO [Time-limited test {}] server.Server(415): Started @157845ms 2024-11-18T22:42:44,048 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T22:42:44,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/08a03ae41e48431290ab26f84db79f71 as hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/08a03ae41e48431290ab26f84db79f71 2024-11-18T22:42:44,106 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/08a03ae41e48431290ab26f84db79f71, entries=1, sequenceid=64, filesize=5.9 K 2024-11-18T22:42:44,113 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 15e7abf84071cfc525f00a5a896716ea in 532ms, sequenceid=64, compaction requested=false 2024-11-18T22:42:44,113 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 15e7abf84071cfc525f00a5a896716ea: 2024-11-18T22:42:44,113 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.6 K, sizeToCheck=16.0 K 2024-11-18T22:42:44,113 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:42:44,113 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/0af1e21fa69f46d89456ea43e823a8f7 because midkey is the same as first or last row 2024-11-18T22:42:44,125 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@73671078[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46537, datanodeUuid=901050c7-a3c6-4f78-bc37-197e9482d8c7, infoPort=38169, infoSecurePort=0, ipcPort=34581, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378):Failed to transfer BP-1358070786-172.17.0.2-1731969729378:blk_1073741869_1052 to 127.0.0.1:39211 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:44,127 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7da0e1bc[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46537, datanodeUuid=901050c7-a3c6-4f78-bc37-197e9482d8c7, infoPort=38169, infoSecurePort=0, ipcPort=34581, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378):Failed to transfer BP-1358070786-172.17.0.2-1731969729378:blk_1073741839_1022 to 127.0.0.1:39211 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:44,496 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:44,874 WARN [Thread-997 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T22:42:44,886 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x512653160f9cac99 with lease ID 0xce46b989a0507484: from storage DS-03f4388d-3160-4e19-9a7e-0049fe475e37 node DatanodeRegistration(127.0.0.1:44647, datanodeUuid=b20a2362-d157-4d15-ba99-ab9b389d4072, infoPort=35517, infoSecurePort=0, ipcPort=43695, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:42:44,887 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x512653160f9cac99 with lease ID 0xce46b989a0507484: from storage DS-3ed104d3-e58b-482e-b9c2-3743721007fc node DatanodeRegistration(127.0.0.1:44647, datanodeUuid=b20a2362-d157-4d15-ba99-ab9b389d4072, infoPort=35517, infoSecurePort=0, ipcPort=43695, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378), blocks: 6, hasStaleStorage: false, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-18T22:42:45,089 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:45,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44647 is added to blk_1073741864_1047 (size=12911) 2024-11-18T22:42:45,120 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@73671078[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46537, datanodeUuid=901050c7-a3c6-4f78-bc37-197e9482d8c7, infoPort=38169, infoSecurePort=0, ipcPort=34581, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378):Failed to transfer BP-1358070786-172.17.0.2-1731969729378:blk_1073741879_1062 to 127.0.0.1:46627 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:45,711 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:46,497 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:46,518 ERROR [FSHLog-0-hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData-prefix:3b0ca91ed64a,36309,1731969733443 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:46,518 WARN [FSHLog-0-hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData-prefix:3b0ca91ed64a,36309,1731969733443 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:46,519 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 3b0ca91ed64a%2C36309%2C1731969733443:(num 1731969735725) roll requested 2024-11-18T22:42:46,520 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C36309%2C1731969733443.1731969766519 2024-11-18T22:42:46,541 WARN [Thread-1017 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1073 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:46,541 WARN [Thread-1017 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741890_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK], DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]) is bad. 2024-11-18T22:42:46,541 WARN [Thread-1017 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741890_1073 2024-11-18T22:42:46,543 WARN [Thread-1017 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK] 2024-11-18T22:42:46,550 WARN [Thread-1017 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1074 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33021 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:46,550 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-648000223_22 at /127.0.0.1:42614 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741891_1074] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data4]'}, localName='127.0.0.1:44647', datanodeUuid='b20a2362-d157-4d15-ba99-ab9b389d4072', xmitsInProgress=0}:Exception transferring block BP-1358070786-172.17.0.2-1731969729378:blk_1073741891_1074 to mirror 127.0.0.1:33021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:46,550 WARN [Thread-1017 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741891_1074 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44647,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK], DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]) is bad. 2024-11-18T22:42:46,551 WARN [Thread-1017 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741891_1074 2024-11-18T22:42:46,551 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-648000223_22 at /127.0.0.1:42614 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741891_1074] {}] datanode.BlockReceiver(316): Block 1073741891 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-18T22:42:46,551 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-648000223_22 at /127.0.0.1:42614 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741891_1074] {}] datanode.DataXceiver(331): 127.0.0.1:44647:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42614 dst: /127.0.0.1:44647 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:46,557 WARN [Thread-1017 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK] 2024-11-18T22:42:46,591 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:46,593 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:46,597 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:46,600 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:46,608 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:46,611 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969766519 2024-11-18T22:42:46,619 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:46,619 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:46,619 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 2024-11-18T22:42:46,621 WARN [IPC Server handler 3 on default port 42521 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 has not been closed. Lease recovery is in progress. RecoveryId = 1076 for block blk_1073741830_1014 2024-11-18T22:42:46,627 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 after 8ms 2024-11-18T22:42:46,640 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35517:35517),(127.0.0.1/127.0.0.1:38169:38169)] 2024-11-18T22:42:46,640 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 is not closed yet, will try archiving it next time 2024-11-18T22:42:47,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44647 is added to blk_1073741884_1067 (size=18097) 2024-11-18T22:42:47,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44647 is added to blk_1073741889_1072 (size=6027) 2024-11-18T22:42:47,712 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:48,498 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:49,713 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:50,498 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:50,629 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 after 4010ms 2024-11-18T22:42:51,716 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:52,501 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:53,717 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:54,502 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:54,916 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@47e026fe {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1358070786-172.17.0.2-1731969729378:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:46627,null,null]) java.net.ConnectException: Call From 3b0ca91ed64a/172.17.0.2 to localhost:39279 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-18T22:42:54,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44647 is added to blk_1073741833_1019 (size=455) 2024-11-18T22:42:55,561 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969737096 to hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/oldWALs/3b0ca91ed64a%2C35719%2C1731969734188.1731969737096 2024-11-18T22:42:55,580 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969759533 to hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/oldWALs/3b0ca91ed64a%2C35719%2C1731969734188.1731969759533 2024-11-18T22:42:55,717 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:55,899 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@766d6621[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44647, datanodeUuid=b20a2362-d157-4d15-ba99-ab9b389d4072, infoPort=35517, infoSecurePort=0, ipcPort=43695, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378):Failed to transfer BP-1358070786-172.17.0.2-1731969729378:blk_1073741833_1019 to 127.0.0.1:39211 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:56,502 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:57,397 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C35719%2C1731969734188.1731969777397 2024-11-18T22:42:57,405 WARN [Thread-1029 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1077 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33021 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:57,406 WARN [Thread-1029 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741893_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK], DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]) is bad. 2024-11-18T22:42:57,406 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-648000223_22 at /127.0.0.1:39122 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741893_1077] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data8]'}, localName='127.0.0.1:46537', datanodeUuid='901050c7-a3c6-4f78-bc37-197e9482d8c7', xmitsInProgress=0}:Exception transferring block BP-1358070786-172.17.0.2-1731969729378:blk_1073741893_1077 to mirror 127.0.0.1:33021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:57,406 WARN [Thread-1029 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741893_1077 2024-11-18T22:42:57,406 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-648000223_22 at /127.0.0.1:39122 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741893_1077] {}] datanode.BlockReceiver(316): Block 1073741893 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-18T22:42:57,406 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-648000223_22 at /127.0.0.1:39122 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741893_1077] {}] datanode.DataXceiver(331): 127.0.0.1:46537:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39122 dst: /127.0.0.1:46537 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:57,407 WARN [Thread-1029 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK] 2024-11-18T22:42:57,420 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-648000223_22 at /127.0.0.1:48752 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741894_1078] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data4]'}, localName='127.0.0.1:44647', datanodeUuid='b20a2362-d157-4d15-ba99-ab9b389d4072', xmitsInProgress=0}:Exception transferring block BP-1358070786-172.17.0.2-1731969729378:blk_1073741894_1078 to mirror 127.0.0.1:39211 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:57,422 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-648000223_22 at /127.0.0.1:48752 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741894_1078] {}] datanode.BlockReceiver(316): Block 1073741894 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-18T22:42:57,422 WARN [Thread-1029 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1078 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39211 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:57,422 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-648000223_22 at /127.0.0.1:48752 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741894_1078] {}] datanode.DataXceiver(331): 127.0.0.1:44647:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48752 dst: /127.0.0.1:44647 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:57,422 WARN [Thread-1029 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741894_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44647,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK], DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]) is bad. 2024-11-18T22:42:57,422 WARN [Thread-1029 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741894_1078 2024-11-18T22:42:57,424 WARN [Thread-1029 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK] 2024-11-18T22:42:57,434 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:57,435 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:57,435 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:57,435 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:57,435 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:57,435 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969761606 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969777397 2024-11-18T22:42:57,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741874_1057 (size=13591) 2024-11-18T22:42:57,438 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.1731969761606 to hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/oldWALs/3b0ca91ed64a%2C35719%2C1731969734188.1731969761606 2024-11-18T22:42:57,439 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35517:35517),(127.0.0.1/127.0.0.1:38169:38169)] 2024-11-18T22:42:57,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35719 {}] regionserver.HRegion(8855): Flush requested on 15e7abf84071cfc525f00a5a896716ea 2024-11-18T22:42:57,471 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 15e7abf84071cfc525f00a5a896716ea 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T22:42:57,486 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/af9801ddbce94180add9775488d97af1 is 1080, key is row0013/info:/1731969777440/Put/seqid=0 2024-11-18T22:42:57,488 WARN [Thread-1037 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1080 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:57,488 WARN [Thread-1037 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741896_1080 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK], DatanodeInfoWithStorage[127.0.0.1:44647,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]) is bad. 2024-11-18T22:42:57,488 WARN [Thread-1037 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741896_1080 2024-11-18T22:42:57,489 WARN [Thread-1037 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK] 2024-11-18T22:42:57,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44647 is added to blk_1073741897_1081 (size=12509) 2024-11-18T22:42:57,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741897_1081 (size=12509) 2024-11-18T22:42:57,500 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/af9801ddbce94180add9775488d97af1 2024-11-18T22:42:57,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/af9801ddbce94180add9775488d97af1 as hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/af9801ddbce94180add9775488d97af1 2024-11-18T22:42:57,520 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/af9801ddbce94180add9775488d97af1, entries=7, sequenceid=74, filesize=12.2 K 2024-11-18T22:42:57,521 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=4.20 KB/4303 for 15e7abf84071cfc525f00a5a896716ea in 50ms, sequenceid=74, compaction requested=true 2024-11-18T22:42:57,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 15e7abf84071cfc525f00a5a896716ea: 2024-11-18T22:42:57,521 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=35.8 K, sizeToCheck=16.0 K 2024-11-18T22:42:57,521 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:42:57,521 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/0af1e21fa69f46d89456ea43e823a8f7 because midkey is the same as first or last row 2024-11-18T22:42:57,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 15e7abf84071cfc525f00a5a896716ea:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T22:42:57,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:42:57,522 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T22:42:57,523 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36633 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T22:42:57,523 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.HStore(1541): 15e7abf84071cfc525f00a5a896716ea/info is initiating minor compaction (all files) 2024-11-18T22:42:57,523 INFO [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 15e7abf84071cfc525f00a5a896716ea/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea. 2024-11-18T22:42:57,523 INFO [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/0af1e21fa69f46d89456ea43e823a8f7, hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/08a03ae41e48431290ab26f84db79f71, hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/af9801ddbce94180add9775488d97af1] into tmpdir=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp, totalSize=35.8 K 2024-11-18T22:42:57,524 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0af1e21fa69f46d89456ea43e823a8f7, keycount=12, bloomtype=ROW, size=17.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1731969753514 2024-11-18T22:42:57,524 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] compactions.Compactor(225): Compacting 08a03ae41e48431290ab26f84db79f71, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=64, earliestPutTs=1731969763580 2024-11-18T22:42:57,525 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] compactions.Compactor(225): Compacting af9801ddbce94180add9775488d97af1, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1731969777440 2024-11-18T22:42:57,541 INFO [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 15e7abf84071cfc525f00a5a896716ea#info#compaction#27 average throughput is 19.50 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T22:42:57,541 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/7d0ddc460a3b4736a466e4db2ad20571 is 1080, key is row0002/info:/1731969753514/Put/seqid=0 2024-11-18T22:42:57,543 WARN [Thread-1046 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1082 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:57,544 WARN [Thread-1046 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741898_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK], DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]) is bad. 2024-11-18T22:42:57,544 WARN [Thread-1046 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741898_1082 2024-11-18T22:42:57,545 WARN [Thread-1046 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK] 2024-11-18T22:42:57,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44647 is added to blk_1073741899_1083 (size=25758) 2024-11-18T22:42:57,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741899_1083 (size=25758) 2024-11-18T22:42:57,717 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-18T22:42:57,718 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:57,901 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T22:42:57,902 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T22:42:57,902 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T22:42:57,902 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:42:57,902 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:42:57,902 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T22:42:57,902 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=886043761, stopped=false 2024-11-18T22:42:57,903 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3b0ca91ed64a,36309,1731969733443 2024-11-18T22:42:57,903 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T22:42:57,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35719-0x101501cccfb0001, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T22:42:57,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35719-0x101501cccfb0001, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:57,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T22:42:57,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:57,921 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T22:42:57,921 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T22:42:57,921 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T22:42:57,922 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:42:57,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39695-0x101501cccfb0002, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T22:42:57,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39695-0x101501cccfb0002, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:42:57,922 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3b0ca91ed64a,35719,1731969734188' ***** 2024-11-18T22:42:57,922 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T22:42:57,922 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3b0ca91ed64a,39695,1731969738197' ***** 2024-11-18T22:42:57,922 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T22:42:57,922 INFO [RS:0;3b0ca91ed64a:35719 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T22:42:57,922 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T22:42:57,923 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35719-0x101501cccfb0001, quorum=127.0.0.1:49655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:42:57,923 INFO [RS:0;3b0ca91ed64a:35719 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T22:42:57,923 INFO [RS:0;3b0ca91ed64a:35719 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T22:42:57,923 INFO [RS:1;3b0ca91ed64a:39695 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T22:42:57,923 INFO [RS:0;3b0ca91ed64a:35719 {}] regionserver.HRegionServer(3091): Received CLOSE for 15e7abf84071cfc525f00a5a896716ea 2024-11-18T22:42:57,924 INFO [RS:1;3b0ca91ed64a:39695 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T22:42:57,924 INFO [RS:1;3b0ca91ed64a:39695 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T22:42:57,924 INFO [RS:1;3b0ca91ed64a:39695 {}] regionserver.HRegionServer(959): stopping server 3b0ca91ed64a,39695,1731969738197 2024-11-18T22:42:57,924 INFO [RS:1;3b0ca91ed64a:39695 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T22:42:57,924 INFO [RS:1;3b0ca91ed64a:39695 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;3b0ca91ed64a:39695. 2024-11-18T22:42:57,924 DEBUG [RS:1;3b0ca91ed64a:39695 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T22:42:57,924 DEBUG [RS:1;3b0ca91ed64a:39695 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:42:57,924 INFO [RS:1;3b0ca91ed64a:39695 {}] regionserver.HRegionServer(976): stopping server 3b0ca91ed64a,39695,1731969738197; all regions closed. 2024-11-18T22:42:57,928 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T22:42:57,928 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:42:57,929 INFO [RS:0;3b0ca91ed64a:35719 {}] regionserver.HRegionServer(959): stopping server 3b0ca91ed64a,35719,1731969734188 2024-11-18T22:42:57,929 INFO [RS:0;3b0ca91ed64a:35719 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T22:42:57,929 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39695-0x101501cccfb0002, quorum=127.0.0.1:49655, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:42:57,929 INFO [RS:0;3b0ca91ed64a:35719 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3b0ca91ed64a:35719. 2024-11-18T22:42:57,929 DEBUG [RS:0;3b0ca91ed64a:35719 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T22:42:57,930 DEBUG [RS:0;3b0ca91ed64a:35719 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:42:57,930 INFO [RS:0;3b0ca91ed64a:35719 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T22:42:57,930 INFO [RS:0;3b0ca91ed64a:35719 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T22:42:57,930 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 15e7abf84071cfc525f00a5a896716ea, disabling compactions & flushes 2024-11-18T22:42:57,930 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1993): waiting for 1 compactions to complete for region TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea. 2024-11-18T22:42:57,931 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:57,931 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:57,931 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:57,931 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:57,932 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:57,936 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:57,937 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:57,937 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 2024-11-18T22:42:57,937 WARN [IPC Server handler 0 on default port 42521 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 has not been closed. Lease recovery is in progress. RecoveryId = 1084 for block blk_1073741837_1013 2024-11-18T22:42:57,938 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 after 1ms 2024-11-18T22:42:57,965 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/7d0ddc460a3b4736a466e4db2ad20571 as hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/7d0ddc460a3b4736a466e4db2ad20571 2024-11-18T22:42:57,978 INFO [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 15e7abf84071cfc525f00a5a896716ea/info of 15e7abf84071cfc525f00a5a896716ea into 7d0ddc460a3b4736a466e4db2ad20571(size=25.2 K), total size for store is 25.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T22:42:57,978 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 15e7abf84071cfc525f00a5a896716ea: 2024-11-18T22:42:57,978 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea. 2024-11-18T22:42:57,978 INFO [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea., storeName=15e7abf84071cfc525f00a5a896716ea/info, priority=13, startTime=1731969777521; duration=0sec 2024-11-18T22:42:57,978 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea. 2024-11-18T22:42:57,978 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea. after waiting 0 ms 2024-11-18T22:42:57,978 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:42:57,978 DEBUG [RS:0;3b0ca91ed64a:35719-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 15e7abf84071cfc525f00a5a896716ea:info 2024-11-18T22:42:57,978 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea. 2024-11-18T22:42:57,978 INFO [RS:0;3b0ca91ed64a:35719 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T22:42:57,978 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 15e7abf84071cfc525f00a5a896716ea 1/1 column families, dataSize=5.25 KB heapSize=5.88 KB 2024-11-18T22:42:57,979 INFO [RS:0;3b0ca91ed64a:35719 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T22:42:57,980 INFO [regionserver/3b0ca91ed64a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-18T22:42:57,980 INFO [regionserver/3b0ca91ed64a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-18T22:42:57,984 INFO [RS:0;3b0ca91ed64a:35719 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-18T22:42:57,984 DEBUG [RS:0;3b0ca91ed64a:35719 {}] regionserver.HRegionServer(1325): Online Regions={15e7abf84071cfc525f00a5a896716ea=TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea., 1588230740=hbase:meta,,1.1588230740} 2024-11-18T22:42:57,984 DEBUG [RS:0;3b0ca91ed64a:35719 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 15e7abf84071cfc525f00a5a896716ea 2024-11-18T22:42:57,984 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T22:42:57,984 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T22:42:57,985 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T22:42:57,985 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T22:42:57,985 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T22:42:57,985 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-18T22:42:57,985 ERROR [FSHLog-0-hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153-prefix:3b0ca91ed64a,35719,1731969734188.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:57,986 WARN [FSHLog-0-hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153-prefix:3b0ca91ed64a,35719,1731969734188.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:57,986 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3b0ca91ed64a%2C35719%2C1731969734188.meta:.meta(num 1731969737739) roll requested 2024-11-18T22:42:57,986 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969777986.meta 2024-11-18T22:42:57,990 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/88b639df8d704d289c47a61cb846dbf2 is 1080, key is row0020/info:/1731969777473/Put/seqid=0 2024-11-18T22:42:58,010 WARN [Thread-1052 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1085 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46627 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:58,010 WARN [Thread-1052 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741900_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44647,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK], DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]) is bad. 2024-11-18T22:42:58,010 WARN [Thread-1052 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741900_1085 2024-11-18T22:42:58,011 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:48800 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741900_1085] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data4]'}, localName='127.0.0.1:44647', datanodeUuid='b20a2362-d157-4d15-ba99-ab9b389d4072', xmitsInProgress=0}:Exception transferring block BP-1358070786-172.17.0.2-1731969729378:blk_1073741900_1085 to mirror 127.0.0.1:46627 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:58,011 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:48800 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741900_1085] {}] datanode.BlockReceiver(316): Block 1073741900 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T22:42:58,011 WARN [Thread-1052 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK] 2024-11-18T22:42:58,011 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:48800 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741900_1085] {}] datanode.DataXceiver(331): 127.0.0.1:44647:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48800 dst: /127.0.0.1:44647 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:58,013 WARN [Thread-1052 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741902_1087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:58,013 WARN [Thread-1052 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741902_1087 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK], DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]) is bad. 2024-11-18T22:42:58,014 WARN [Thread-1052 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741902_1087 2024-11-18T22:42:58,015 WARN [Thread-1052 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK] 2024-11-18T22:42:58,017 WARN [Thread-1052 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741903_1088 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:58,017 WARN [Thread-1052 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741903_1088 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK], DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]) is bad. 2024-11-18T22:42:58,017 WARN [Thread-1052 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741903_1088 2024-11-18T22:42:58,018 WARN [Thread-1052 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK] 2024-11-18T22:42:58,027 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:48816 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741901_1086] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data4]'}, localName='127.0.0.1:44647', datanodeUuid='b20a2362-d157-4d15-ba99-ab9b389d4072', xmitsInProgress=0}:Exception transferring block BP-1358070786-172.17.0.2-1731969729378:blk_1073741901_1086 to mirror 127.0.0.1:39211 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:58,027 WARN [Thread-1053 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1086 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39211 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:58,027 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:48816 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741901_1086] {}] datanode.BlockReceiver(316): Block 1073741901 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-18T22:42:58,027 WARN [Thread-1053 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741901_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44647,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK], DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]) is bad. 2024-11-18T22:42:58,027 WARN [Thread-1053 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741901_1086 2024-11-18T22:42:58,027 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:48816 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741901_1086] {}] datanode.DataXceiver(331): 127.0.0.1:44647:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48816 dst: /127.0.0.1:44647 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:58,034 WARN [Thread-1053 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK] 2024-11-18T22:42:58,036 WARN [Thread-1053 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741905_1090 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:58,036 WARN [Thread-1053 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741905_1090 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK], DatanodeInfoWithStorage[127.0.0.1:44647,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]) is bad. 2024-11-18T22:42:58,036 WARN [Thread-1053 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741905_1090 2024-11-18T22:42:58,037 WARN [Thread-1053 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK] 2024-11-18T22:42:58,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741904_1089 (size=9267) 2024-11-18T22:42:58,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44647 is added to blk_1073741904_1089 (size=9267) 2024-11-18T22:42:58,089 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:58,089 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:58,089 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:58,090 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:58,091 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:58,092 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969777986.meta 2024-11-18T22:42:58,092 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:58,093 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:58,093 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta 2024-11-18T22:42:58,096 WARN [IPC Server handler 4 on default port 42521 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta has not been closed. Lease recovery is in progress. RecoveryId = 1092 for block blk_1073741834_1010 2024-11-18T22:42:58,098 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta after 5ms 2024-11-18T22:42:58,104 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35517:35517),(127.0.0.1/127.0.0.1:38169:38169)] 2024-11-18T22:42:58,104 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta is not closed yet, will try archiving it next time 2024-11-18T22:42:58,162 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/hbase/meta/1588230740/.tmp/info/fce977b9e39347598def79ab983627e5 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea./info:regioninfo/1731969739195/Put/seqid=0 2024-11-18T22:42:58,175 WARN [Thread-1066 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741907_1093 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:58,175 WARN [Thread-1066 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741907_1093 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK], DatanodeInfoWithStorage[127.0.0.1:44647,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]) is bad. 2024-11-18T22:42:58,175 WARN [Thread-1066 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741907_1093 2024-11-18T22:42:58,177 WARN [Thread-1066 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK] 2024-11-18T22:42:58,185 DEBUG [RS:0;3b0ca91ed64a:35719 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 15e7abf84071cfc525f00a5a896716ea 2024-11-18T22:42:58,193 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:39188 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741908_1094] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data8]'}, localName='127.0.0.1:46537', datanodeUuid='901050c7-a3c6-4f78-bc37-197e9482d8c7', xmitsInProgress=0}:Exception transferring block BP-1358070786-172.17.0.2-1731969729378:blk_1073741908_1094 to mirror 127.0.0.1:39211 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:58,194 WARN [Thread-1066 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741908_1094 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39211 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:58,194 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:39188 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741908_1094] {}] datanode.BlockReceiver(316): Block 1073741908 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T22:42:58,194 WARN [Thread-1066 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741908_1094 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK], DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]) is bad. 2024-11-18T22:42:58,194 WARN [Thread-1066 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741908_1094 2024-11-18T22:42:58,194 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:39188 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741908_1094] {}] datanode.DataXceiver(331): 127.0.0.1:46537:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39188 dst: /127.0.0.1:46537 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:58,195 WARN [Thread-1066 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK] 2024-11-18T22:42:58,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44647 is added to blk_1073741909_1095 (size=7089) 2024-11-18T22:42:58,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741909_1095 (size=7089) 2024-11-18T22:42:58,228 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/hbase/meta/1588230740/.tmp/info/fce977b9e39347598def79ab983627e5 2024-11-18T22:42:58,265 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/hbase/meta/1588230740/.tmp/ns/6e6684c1c01b43808b21aba999b08f3d is 43, key is default/ns:d/1731969738036/Put/seqid=0 2024-11-18T22:42:58,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741910_1096 (size=5153) 2024-11-18T22:42:58,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44647 is added to blk_1073741910_1096 (size=5153) 2024-11-18T22:42:58,385 DEBUG [RS:0;3b0ca91ed64a:35719 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 15e7abf84071cfc525f00a5a896716ea 2024-11-18T22:42:58,473 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.25 KB at sequenceid=83 (bloomFilter=true), to=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/88b639df8d704d289c47a61cb846dbf2 2024-11-18T22:42:58,487 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/.tmp/info/88b639df8d704d289c47a61cb846dbf2 as hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/88b639df8d704d289c47a61cb846dbf2 2024-11-18T22:42:58,498 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/88b639df8d704d289c47a61cb846dbf2, entries=4, sequenceid=83, filesize=9.0 K 2024-11-18T22:42:58,499 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~5.25 KB/5378, heapSize ~5.86 KB/6000, currentSize=0 B/0 for 15e7abf84071cfc525f00a5a896716ea in 521ms, sequenceid=83, compaction requested=false 2024-11-18T22:42:58,500 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/5282a32c76864b9c8a6c93bf9146a242, hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/1cd917041d6b40b3a396bec021842bd3, hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/f6630933e5d248a3b16e87c28515ada2, hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/fb8b5f89c7874efd8085c148dbe7ec4e, hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/e249ead501bc4e1989cf1540ee0b112a, hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/0af1e21fa69f46d89456ea43e823a8f7, hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/4194a6b1dac7488c94ac1834cd0d9ed8, hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/08a03ae41e48431290ab26f84db79f71, hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/af9801ddbce94180add9775488d97af1] to archive 2024-11-18T22:42:58,503 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T22:42:58,506 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/5282a32c76864b9c8a6c93bf9146a242 to hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/5282a32c76864b9c8a6c93bf9146a242 2024-11-18T22:42:58,512 INFO [regionserver/3b0ca91ed64a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T22:42:58,513 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/1cd917041d6b40b3a396bec021842bd3 to hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/1cd917041d6b40b3a396bec021842bd3 2024-11-18T22:42:58,517 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/f6630933e5d248a3b16e87c28515ada2 to hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/f6630933e5d248a3b16e87c28515ada2 2024-11-18T22:42:58,518 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/fb8b5f89c7874efd8085c148dbe7ec4e to hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/fb8b5f89c7874efd8085c148dbe7ec4e 2024-11-18T22:42:58,524 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/e249ead501bc4e1989cf1540ee0b112a to hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/e249ead501bc4e1989cf1540ee0b112a 2024-11-18T22:42:58,528 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/0af1e21fa69f46d89456ea43e823a8f7 to hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/0af1e21fa69f46d89456ea43e823a8f7 2024-11-18T22:42:58,533 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/4194a6b1dac7488c94ac1834cd0d9ed8 to hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/4194a6b1dac7488c94ac1834cd0d9ed8 2024-11-18T22:42:58,535 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/08a03ae41e48431290ab26f84db79f71 to hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/08a03ae41e48431290ab26f84db79f71 2024-11-18T22:42:58,538 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/af9801ddbce94180add9775488d97af1 to hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/info/af9801ddbce94180add9775488d97af1 2024-11-18T22:42:58,539 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=3b0ca91ed64a:36309 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-18T22:42:58,539 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [5282a32c76864b9c8a6c93bf9146a242=10347, 1cd917041d6b40b3a396bec021842bd3=12506, f6630933e5d248a3b16e87c28515ada2=17994, fb8b5f89c7874efd8085c148dbe7ec4e=6027, e249ead501bc4e1989cf1540ee0b112a=6027, 0af1e21fa69f46d89456ea43e823a8f7=18097, 4194a6b1dac7488c94ac1834cd0d9ed8=6027, 08a03ae41e48431290ab26f84db79f71=6027, af9801ddbce94180add9775488d97af1=12509] 2024-11-18T22:42:58,548 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/default/TestLogRolling-testLogRollOnDatanodeDeath/15e7abf84071cfc525f00a5a896716ea/recovered.edits/86.seqid, newMaxSeqId=86, maxSeqId=1 2024-11-18T22:42:58,550 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea. 2024-11-18T22:42:58,550 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 15e7abf84071cfc525f00a5a896716ea: Waiting for close lock at 1731969777930Running coprocessor pre-close hooks at 1731969777930Disabling compacts and flushes for region at 1731969777930Disabling writes for close at 1731969777978 (+48 ms)Obtaining lock to block concurrent updates at 1731969777979 (+1 ms)Preparing flush snapshotting stores in 15e7abf84071cfc525f00a5a896716ea at 1731969777979Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea., syncing WAL and waiting on mvcc, flushsize=dataSize=5378, getHeapSize=6000, getOffHeapSize=0, getCellsCount=5 at 1731969777979Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea. at 1731969777980 (+1 ms)Flushing 15e7abf84071cfc525f00a5a896716ea/info: creating writer at 1731969777980Flushing 15e7abf84071cfc525f00a5a896716ea/info: appending metadata at 1731969777989 (+9 ms)Flushing 15e7abf84071cfc525f00a5a896716ea/info: closing flushed file at 1731969777989Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b209f7: reopening flushed file at 1731969778486 (+497 ms)Finished flush of dataSize ~5.25 KB/5378, heapSize ~5.86 KB/6000, currentSize=0 B/0 for 15e7abf84071cfc525f00a5a896716ea in 521ms, sequenceid=83, compaction requested=false at 1731969778499 (+13 ms)Writing region close event to WAL at 1731969778540 (+41 ms)Running coprocessor post-close hooks at 1731969778549 (+9 ms)Closed at 1731969778550 (+1 ms) 2024-11-18T22:42:58,550 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731969738581.15e7abf84071cfc525f00a5a896716ea. 2024-11-18T22:42:58,572 INFO [regionserver/3b0ca91ed64a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-18T22:42:58,573 INFO [regionserver/3b0ca91ed64a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-18T22:42:58,586 DEBUG [RS:0;3b0ca91ed64a:35719 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-18T22:42:58,617 INFO [master/3b0ca91ed64a:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-18T22:42:58,617 INFO [master/3b0ca91ed64a:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-18T22:42:58,716 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/hbase/meta/1588230740/.tmp/ns/6e6684c1c01b43808b21aba999b08f3d 2024-11-18T22:42:58,760 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/hbase/meta/1588230740/.tmp/table/8b259f57bda54f2dbf1158e09b6567bd is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731969739273/Put/seqid=0 2024-11-18T22:42:58,765 WARN [Thread-1080 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741911_1097 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:58,765 WARN [Thread-1080 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741911_1097 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK], DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK]) is bad. 2024-11-18T22:42:58,765 WARN [Thread-1080 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741911_1097 2024-11-18T22:42:58,767 WARN [Thread-1080 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33021,DS-82377799-5076-490f-925a-460019a7a47d,DISK] 2024-11-18T22:42:58,770 WARN [Thread-1080 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741912_1098 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39211 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:58,770 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:48860 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741912_1098] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data4]'}, localName='127.0.0.1:44647', datanodeUuid='b20a2362-d157-4d15-ba99-ab9b389d4072', xmitsInProgress=0}:Exception transferring block BP-1358070786-172.17.0.2-1731969729378:blk_1073741912_1098 to mirror 127.0.0.1:39211 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:58,770 WARN [Thread-1080 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741912_1098 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44647,DS-03f4388d-3160-4e19-9a7e-0049fe475e37,DISK], DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK]) is bad. 2024-11-18T22:42:58,770 WARN [Thread-1080 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741912_1098 2024-11-18T22:42:58,770 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:48860 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741912_1098] {}] datanode.BlockReceiver(316): Block 1073741912 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T22:42:58,770 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:48860 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741912_1098] {}] datanode.DataXceiver(331): 127.0.0.1:44647:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48860 dst: /127.0.0.1:44647 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:58,771 WARN [Thread-1080 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39211,DS-4ea37519-5a3f-43ab-8020-00d146722880,DISK] 2024-11-18T22:42:58,785 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:39214 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741913_1099] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data8]'}, localName='127.0.0.1:46537', datanodeUuid='901050c7-a3c6-4f78-bc37-197e9482d8c7', xmitsInProgress=0}:Exception transferring block BP-1358070786-172.17.0.2-1731969729378:blk_1073741913_1099 to mirror 127.0.0.1:46627 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:58,785 WARN [Thread-1080 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741913_1099 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46627 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:42:58,786 WARN [Thread-1080 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1358070786-172.17.0.2-1731969729378:blk_1073741913_1099 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46537,DS-4cbff23f-8b18-4c6a-aa40-4332f35fe653,DISK], DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK]) is bad. 2024-11-18T22:42:58,786 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:39214 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741913_1099] {}] datanode.BlockReceiver(316): Block 1073741913 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-18T22:42:58,786 WARN [Thread-1080 {}] hdfs.DataStreamer(1850): Abandoning BP-1358070786-172.17.0.2-1731969729378:blk_1073741913_1099 2024-11-18T22:42:58,786 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_724027061_22 at /127.0.0.1:39214 [Receiving block BP-1358070786-172.17.0.2-1731969729378:blk_1073741913_1099] {}] datanode.DataXceiver(331): 127.0.0.1:46537:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39214 dst: /127.0.0.1:46537 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:42:58,786 DEBUG [RS:0;3b0ca91ed64a:35719 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-18T22:42:58,789 WARN [Thread-1080 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46627,DS-ef8753ea-5241-4e99-9943-c350af2b0b10,DISK] 2024-11-18T22:42:58,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44647 is added to blk_1073741914_1100 (size=5424) 2024-11-18T22:42:58,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741914_1100 (size=5424) 2024-11-18T22:42:58,827 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/hbase/meta/1588230740/.tmp/table/8b259f57bda54f2dbf1158e09b6567bd 2024-11-18T22:42:58,855 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/hbase/meta/1588230740/.tmp/info/fce977b9e39347598def79ab983627e5 as hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/hbase/meta/1588230740/info/fce977b9e39347598def79ab983627e5 2024-11-18T22:42:58,866 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/hbase/meta/1588230740/info/fce977b9e39347598def79ab983627e5, entries=10, sequenceid=11, filesize=6.9 K 2024-11-18T22:42:58,868 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/hbase/meta/1588230740/.tmp/ns/6e6684c1c01b43808b21aba999b08f3d as hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/hbase/meta/1588230740/ns/6e6684c1c01b43808b21aba999b08f3d 2024-11-18T22:42:58,880 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/hbase/meta/1588230740/ns/6e6684c1c01b43808b21aba999b08f3d, entries=2, sequenceid=11, filesize=5.0 K 2024-11-18T22:42:58,882 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/hbase/meta/1588230740/.tmp/table/8b259f57bda54f2dbf1158e09b6567bd as hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/hbase/meta/1588230740/table/8b259f57bda54f2dbf1158e09b6567bd 2024-11-18T22:42:58,893 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/hbase/meta/1588230740/table/8b259f57bda54f2dbf1158e09b6567bd, entries=2, sequenceid=11, filesize=5.3 K 2024-11-18T22:42:58,899 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 913ms, sequenceid=11, compaction requested=false 2024-11-18T22:42:58,923 INFO [regionserver/3b0ca91ed64a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T22:42:58,928 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-18T22:42:58,932 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T22:42:58,932 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T22:42:58,933 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731969777984Running coprocessor pre-close hooks at 1731969777984Disabling compacts and flushes for region at 1731969777984Disabling writes for close at 1731969777985 (+1 ms)Obtaining lock to block concurrent updates at 1731969777985Preparing flush snapshotting stores in 1588230740 at 1731969777985Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731969777986 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731969778109 (+123 ms)Flushing 1588230740/info: creating writer at 1731969778109Flushing 1588230740/info: appending metadata at 1731969778161 (+52 ms)Flushing 1588230740/info: closing flushed file at 1731969778161Flushing 1588230740/ns: creating writer at 1731969778239 (+78 ms)Flushing 1588230740/ns: appending metadata at 1731969778265 (+26 ms)Flushing 1588230740/ns: closing flushed file at 1731969778265Flushing 1588230740/table: creating writer at 1731969778727 (+462 ms)Flushing 1588230740/table: appending metadata at 1731969778759 (+32 ms)Flushing 1588230740/table: closing flushed file at 1731969778759Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16a3d6: reopening flushed file at 1731969778853 (+94 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@f30ae12: reopening flushed file at 1731969778866 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a9099f5: reopening flushed file at 1731969778880 (+14 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 913ms, sequenceid=11, compaction requested=false at 1731969778899 (+19 ms)Writing region close event to WAL at 1731969778917 (+18 ms)Running coprocessor post-close hooks at 1731969778932 (+15 ms)Closed at 1731969778932 2024-11-18T22:42:58,933 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T22:42:58,987 INFO [RS:0;3b0ca91ed64a:35719 {}] regionserver.HRegionServer(976): stopping server 3b0ca91ed64a,35719,1731969734188; all regions closed. 2024-11-18T22:42:58,996 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:58,996 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:58,997 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:58,997 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:58,998 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:42:59,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741906_1091 (size=825) 2024-11-18T22:42:59,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44647 is added to blk_1073741906_1091 (size=825) 2024-11-18T22:42:59,125 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@73671078[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46537, datanodeUuid=901050c7-a3c6-4f78-bc37-197e9482d8c7, infoPort=38169, infoSecurePort=0, ipcPort=34581, storageInfo=lv=-57;cid=testClusterID;nsid=697129072;c=1731969729378):Failed to transfer BP-1358070786-172.17.0.2-1731969729378:blk_1073741874_1057 to 127.0.0.1:39211 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:43:01,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741835_1011 (size=393) 2024-11-18T22:43:01,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741831_1007 (size=1321) 2024-11-18T22:43:01,939 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 after 4002ms 2024-11-18T22:43:02,099 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta after 4006ms 2024-11-18T22:43:02,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741829_1005 (size=34) 2024-11-18T22:43:02,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741827_1003 (size=196) 2024-11-18T22:43:02,937 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-18T22:43:02,941 DEBUG [RS:1;3b0ca91ed64a:39695 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/oldWALs 2024-11-18T22:43:02,942 INFO [RS:1;3b0ca91ed64a:39695 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3b0ca91ed64a%2C39695%2C1731969738197:(num 1731969738723) 2024-11-18T22:43:02,942 DEBUG [RS:1;3b0ca91ed64a:39695 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:43:02,942 INFO [RS:1;3b0ca91ed64a:39695 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T22:43:02,942 INFO [RS:1;3b0ca91ed64a:39695 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T22:43:02,942 INFO [RS:1;3b0ca91ed64a:39695 {}] hbase.ChoreService(370): Chore service for: regionserver/3b0ca91ed64a:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-18T22:43:02,943 INFO [RS:1;3b0ca91ed64a:39695 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T22:43:02,943 INFO [RS:1;3b0ca91ed64a:39695 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T22:43:02,943 INFO [RS:1;3b0ca91ed64a:39695 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T22:43:02,943 INFO [RS:1;3b0ca91ed64a:39695 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T22:43:02,943 INFO [RS:1;3b0ca91ed64a:39695 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39695 2024-11-18T22:43:02,943 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T22:43:02,952 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor192.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor192.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor192.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:02,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T22:43:02,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39695-0x101501cccfb0002, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3b0ca91ed64a,39695,1731969738197 2024-11-18T22:43:02,956 INFO [RS:1;3b0ca91ed64a:39695 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T22:43:02,968 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3b0ca91ed64a,39695,1731969738197] 2024-11-18T22:43:02,978 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3b0ca91ed64a,39695,1731969738197 already deleted, retry=false 2024-11-18T22:43:02,978 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3b0ca91ed64a,39695,1731969738197 expired; onlineServers=1 2024-11-18T22:43:03,067 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39695-0x101501cccfb0002, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:43:03,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39695-0x101501cccfb0002, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:43:03,068 INFO [RS:1;3b0ca91ed64a:39695 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T22:43:03,068 INFO [RS:1;3b0ca91ed64a:39695 {}] regionserver.HRegionServer(1031): Exiting; stopping=3b0ca91ed64a,39695,1731969738197; zookeeper connection closed. 2024-11-18T22:43:03,069 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@670e30b3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@670e30b3 2024-11-18T22:43:03,551 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:03,570 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:03,571 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:03,571 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:03,581 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:03,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:03,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:04,008 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-18T22:43:04,014 DEBUG [RS:0;3b0ca91ed64a:35719 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/oldWALs 2024-11-18T22:43:04,014 INFO [RS:0;3b0ca91ed64a:35719 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3b0ca91ed64a%2C35719%2C1731969734188.meta:.meta(num 1731969777986) 2024-11-18T22:43:04,016 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:04,016 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:04,017 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:04,017 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:04,017 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:04,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44647 is added to blk_1073741895_1079 (size=16308) 2024-11-18T22:43:04,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741895_1079 (size=16308) 2024-11-18T22:43:04,084 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T22:43:04,101 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor192.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor192.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor192.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:04,105 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.FSHLog(580): java.nio.channels.ClosedChannelException: null at org.apache.hadoop.hdfs.ExceptionLastSeen.throwException4Close(ExceptionLastSeen.java:73) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:158) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.getCurrentBlockReplication(DFSOutputStream.java:775) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.HdfsDataOutputStream.getCurrentBlockReplication(HdfsDataOutputStream.java:79) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.getLogReplication(FSHLog.java:577) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.doCheckLogLowReplication(FSHLog.java:525) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.checkLogLowReplication(AbstractFSWAL.java:2224) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.checkLowReplication(AbstractWALRoller.java:148) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:176) ~[classes/:?] 2024-11-18T22:43:04,107 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:04,107 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:04,108 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:04,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:04,110 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:04,118 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:04,425 DEBUG [RS:0;3b0ca91ed64a:35719 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/oldWALs 2024-11-18T22:43:04,425 INFO [RS:0;3b0ca91ed64a:35719 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3b0ca91ed64a%2C35719%2C1731969734188:(num 1731969777397) 2024-11-18T22:43:04,425 DEBUG [RS:0;3b0ca91ed64a:35719 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:43:04,425 INFO [RS:0;3b0ca91ed64a:35719 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T22:43:04,425 INFO [RS:0;3b0ca91ed64a:35719 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T22:43:04,425 INFO [RS:0;3b0ca91ed64a:35719 {}] hbase.ChoreService(370): Chore service for: regionserver/3b0ca91ed64a:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-18T22:43:04,426 INFO [RS:0;3b0ca91ed64a:35719 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T22:43:04,426 INFO [RS:0;3b0ca91ed64a:35719 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35719 2024-11-18T22:43:04,426 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T22:43:04,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35719-0x101501cccfb0001, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3b0ca91ed64a,35719,1731969734188 2024-11-18T22:43:04,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T22:43:04,448 INFO [RS:0;3b0ca91ed64a:35719 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T22:43:04,468 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3b0ca91ed64a,35719,1731969734188] 2024-11-18T22:43:04,478 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3b0ca91ed64a,35719,1731969734188 already deleted, retry=false 2024-11-18T22:43:04,478 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3b0ca91ed64a,35719,1731969734188 expired; onlineServers=0 2024-11-18T22:43:04,478 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3b0ca91ed64a,36309,1731969733443' ***** 2024-11-18T22:43:04,478 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T22:43:04,478 INFO [M:0;3b0ca91ed64a:36309 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T22:43:04,478 INFO [M:0;3b0ca91ed64a:36309 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T22:43:04,478 DEBUG [M:0;3b0ca91ed64a:36309 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T22:43:04,479 DEBUG [M:0;3b0ca91ed64a:36309 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T22:43:04,479 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T22:43:04,479 INFO [M:0;3b0ca91ed64a:36309 {}] hbase.ChoreService(370): Chore service for: master/3b0ca91ed64a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T22:43:04,479 INFO [M:0;3b0ca91ed64a:36309 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T22:43:04,479 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.large.0-1731969736524 {}] cleaner.HFileCleaner(306): Exit Thread[master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.large.0-1731969736524,5,FailOnTimeoutGroup] 2024-11-18T22:43:04,479 DEBUG [M:0;3b0ca91ed64a:36309 {}] master.HMaster(1795): Stopping service threads 2024-11-18T22:43:04,479 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.small.0-1731969736524 {}] cleaner.HFileCleaner(306): Exit Thread[master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.small.0-1731969736524,5,FailOnTimeoutGroup] 2024-11-18T22:43:04,479 INFO [M:0;3b0ca91ed64a:36309 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T22:43:04,479 INFO [M:0;3b0ca91ed64a:36309 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T22:43:04,479 INFO [M:0;3b0ca91ed64a:36309 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T22:43:04,479 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T22:43:04,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T22:43:04,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:43:04,493 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-18T22:43:04,493 DEBUG [RegionServerTracker-0 {}] master.ActiveMasterManager(353): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-18T22:43:04,493 INFO [M:0;3b0ca91ed64a:36309 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/.lastflushedseqids 2024-11-18T22:43:04,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741915_1101 (size=130) 2024-11-18T22:43:04,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44647 is added to blk_1073741915_1101 (size=130) 2024-11-18T22:43:04,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35719-0x101501cccfb0001, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:43:04,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35719-0x101501cccfb0001, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:43:04,572 INFO [RS:0;3b0ca91ed64a:35719 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T22:43:04,572 INFO [RS:0;3b0ca91ed64a:35719 {}] regionserver.HRegionServer(1031): Exiting; stopping=3b0ca91ed64a,35719,1731969734188; zookeeper connection closed. 2024-11-18T22:43:04,575 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1d8ad9d2 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1d8ad9d2 2024-11-18T22:43:04,579 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-18T22:43:04,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741825_1001 (size=7) 2024-11-18T22:43:04,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741836_1012 (size=76) 2024-11-18T22:43:04,950 INFO [M:0;3b0ca91ed64a:36309 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T22:43:04,950 INFO [M:0;3b0ca91ed64a:36309 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T22:43:04,951 DEBUG [M:0;3b0ca91ed64a:36309 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T22:43:04,951 INFO [M:0;3b0ca91ed64a:36309 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:43:04,951 DEBUG [M:0;3b0ca91ed64a:36309 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:43:04,951 DEBUG [M:0;3b0ca91ed64a:36309 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T22:43:04,951 DEBUG [M:0;3b0ca91ed64a:36309 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:43:04,951 INFO [M:0;3b0ca91ed64a:36309 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-18T22:43:04,956 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:04,980 DEBUG [M:0;3b0ca91ed64a:36309 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0b26c5bd5fc14727bacc1f65959ee244 is 82, key is hbase:meta,,1/info:regioninfo/1731969737896/Put/seqid=0 2024-11-18T22:43:04,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741916_1102 (size=5672) 2024-11-18T22:43:04,992 INFO [M:0;3b0ca91ed64a:36309 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0b26c5bd5fc14727bacc1f65959ee244 2024-11-18T22:43:04,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44647 is added to blk_1073741916_1102 (size=5672) 2024-11-18T22:43:05,030 DEBUG [M:0;3b0ca91ed64a:36309 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2b048af6c75f4dfea26384ea1e9af057 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731969739301/Put/seqid=0 2024-11-18T22:43:05,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44647 is added to blk_1073741917_1103 (size=6255) 2024-11-18T22:43:05,053 INFO [M:0;3b0ca91ed64a:36309 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2b048af6c75f4dfea26384ea1e9af057 2024-11-18T22:43:05,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741917_1103 (size=6255) 2024-11-18T22:43:05,063 INFO [M:0;3b0ca91ed64a:36309 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2b048af6c75f4dfea26384ea1e9af057 2024-11-18T22:43:05,084 DEBUG [M:0;3b0ca91ed64a:36309 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/92542ee7297d4628b17ddce8422bdc84 is 69, key is 3b0ca91ed64a,35719,1731969734188/rs:state/1731969736769/Put/seqid=0 2024-11-18T22:43:05,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44647 is added to blk_1073741918_1104 (size=5224) 2024-11-18T22:43:05,103 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:05,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741918_1104 (size=5224) 2024-11-18T22:43:05,500 INFO [M:0;3b0ca91ed64a:36309 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/92542ee7297d4628b17ddce8422bdc84 2024-11-18T22:43:05,531 DEBUG [M:0;3b0ca91ed64a:36309 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5d5e628dbd8c4a75b3738dbda5b59157 is 52, key is load_balancer_on/state:d/1731969738171/Put/seqid=0 2024-11-18T22:43:05,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741919_1105 (size=5056) 2024-11-18T22:43:05,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44647 is added to blk_1073741919_1105 (size=5056) 2024-11-18T22:43:05,573 INFO [M:0;3b0ca91ed64a:36309 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5d5e628dbd8c4a75b3738dbda5b59157 2024-11-18T22:43:05,586 DEBUG [M:0;3b0ca91ed64a:36309 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0b26c5bd5fc14727bacc1f65959ee244 as hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0b26c5bd5fc14727bacc1f65959ee244 2024-11-18T22:43:05,608 INFO [M:0;3b0ca91ed64a:36309 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0b26c5bd5fc14727bacc1f65959ee244, entries=8, sequenceid=60, filesize=5.5 K 2024-11-18T22:43:05,613 DEBUG [M:0;3b0ca91ed64a:36309 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2b048af6c75f4dfea26384ea1e9af057 as hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2b048af6c75f4dfea26384ea1e9af057 2024-11-18T22:43:05,622 INFO [M:0;3b0ca91ed64a:36309 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2b048af6c75f4dfea26384ea1e9af057 2024-11-18T22:43:05,622 INFO [M:0;3b0ca91ed64a:36309 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2b048af6c75f4dfea26384ea1e9af057, entries=6, sequenceid=60, filesize=6.1 K 2024-11-18T22:43:05,623 DEBUG [M:0;3b0ca91ed64a:36309 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/92542ee7297d4628b17ddce8422bdc84 as hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/92542ee7297d4628b17ddce8422bdc84 2024-11-18T22:43:05,633 INFO [M:0;3b0ca91ed64a:36309 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/92542ee7297d4628b17ddce8422bdc84, entries=2, sequenceid=60, filesize=5.1 K 2024-11-18T22:43:05,634 DEBUG [M:0;3b0ca91ed64a:36309 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5d5e628dbd8c4a75b3738dbda5b59157 as hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5d5e628dbd8c4a75b3738dbda5b59157 2024-11-18T22:43:05,662 INFO [M:0;3b0ca91ed64a:36309 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5d5e628dbd8c4a75b3738dbda5b59157, entries=1, sequenceid=60, filesize=4.9 K 2024-11-18T22:43:05,664 INFO [M:0;3b0ca91ed64a:36309 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 713ms, sequenceid=60, compaction requested=false 2024-11-18T22:43:05,713 INFO [M:0;3b0ca91ed64a:36309 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:43:05,714 DEBUG [M:0;3b0ca91ed64a:36309 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731969784951Disabling compacts and flushes for region at 1731969784951Disabling writes for close at 1731969784951Obtaining lock to block concurrent updates at 1731969784951Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731969784951Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1731969784952 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731969784956 (+4 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731969784956Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731969784979 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731969784979Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731969785007 (+28 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731969785029 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731969785029Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731969785063 (+34 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731969785083 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731969785083Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731969785507 (+424 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731969785530 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731969785530Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7b6c580b: reopening flushed file at 1731969785583 (+53 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b5f2ef3: reopening flushed file at 1731969785608 (+25 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b1fb88e: reopening flushed file at 1731969785622 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7eb14125: reopening flushed file at 1731969785633 (+11 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 713ms, sequenceid=60, compaction requested=false at 1731969785664 (+31 ms)Writing region close event to WAL at 1731969785713 (+49 ms)Closed at 1731969785713 2024-11-18T22:43:05,724 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:05,732 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:05,736 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:05,740 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:05,744 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:05,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44647 is added to blk_1073741892_1075 (size=1045) 2024-11-18T22:43:05,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741892_1075 (size=1045) 2024-11-18T22:43:05,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741828_1004 (size=1189) 2024-11-18T22:43:05,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741832_1008 (size=32) 2024-11-18T22:43:05,957 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:06,104 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:06,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:07,105 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:07,235 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-18T22:43:07,235 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T22:43:07,235 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T22:43:07,235 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-18T22:43:07,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741826_1002 (size=42) 2024-11-18T22:43:07,959 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:08,106 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:08,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44647 is added to blk_1073741838_1020 (size=2431) 2024-11-18T22:43:08,911 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@61dec4a2 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1358070786-172.17.0.2-1731969729378:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:46627,null,null]) java.net.ConnectException: Call From 3b0ca91ed64a/172.17.0.2 to localhost:39279 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-18T22:43:08,960 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:09,107 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:09,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:10,108 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:10,758 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-18T22:43:10,758 INFO [M:0;3b0ca91ed64a:36309 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T22:43:10,759 INFO [M:0;3b0ca91ed64a:36309 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36309 2024-11-18T22:43:10,759 INFO [M:0;3b0ca91ed64a:36309 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T22:43:10,760 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T22:43:10,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46537 is added to blk_1073741837_1084 (size=85) 2024-11-18T22:43:10,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:43:10,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36309-0x101501cccfb0000, quorum=127.0.0.1:49655, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:43:10,922 INFO [M:0;3b0ca91ed64a:36309 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T22:43:10,941 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3e6567e4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:43:10,941 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@278e888c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:43:10,942 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:43:10,942 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4faf4ee8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:43:10,942 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1c713165{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/hadoop.log.dir/,STOPPED} 2024-11-18T22:43:10,962 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:10,969 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@61dec4a2 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1358070786-172.17.0.2-1731969729378:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:46627,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:39279 , LocalHost:localPort 3b0ca91ed64a/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-18T22:43:10,970 WARN [BP-1358070786-172.17.0.2-1731969729378 heartbeating to localhost/127.0.0.1:42521 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T22:43:10,970 WARN [BP-1358070786-172.17.0.2-1731969729378 heartbeating to localhost/127.0.0.1:42521 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1358070786-172.17.0.2-1731969729378 (Datanode Uuid b20a2362-d157-4d15-ba99-ab9b389d4072) service to localhost/127.0.0.1:42521 2024-11-18T22:43:10,971 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data3/current/BP-1358070786-172.17.0.2-1731969729378 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:43:10,971 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data4/current/BP-1358070786-172.17.0.2-1731969729378 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:43:10,971 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T22:43:10,971 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T22:43:10,972 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T22:43:10,981 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@23861c9a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:43:10,981 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@70c175e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:43:10,982 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:43:10,982 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57b21db5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:43:10,982 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d01e731{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/hadoop.log.dir/,STOPPED} 2024-11-18T22:43:11,000 WARN [BP-1358070786-172.17.0.2-1731969729378 heartbeating to localhost/127.0.0.1:42521 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T22:43:11,000 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T22:43:11,000 WARN [BP-1358070786-172.17.0.2-1731969729378 heartbeating to localhost/127.0.0.1:42521 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1358070786-172.17.0.2-1731969729378 (Datanode Uuid 901050c7-a3c6-4f78-bc37-197e9482d8c7) service to localhost/127.0.0.1:42521 2024-11-18T22:43:11,000 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T22:43:11,001 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data7/current/BP-1358070786-172.17.0.2-1731969729378 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:43:11,001 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/cluster_fd5f8588-ae57-7dbc-886b-5745ee36413d/data/data8/current/BP-1358070786-172.17.0.2-1731969729378 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:43:11,002 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T22:43:11,023 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@367ccadf{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T22:43:11,040 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@52b2db2b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:43:11,040 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:43:11,040 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@41fecd82{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:43:11,040 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@774a68bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/hadoop.log.dir/,STOPPED} 2024-11-18T22:43:11,067 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T22:43:11,109 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:11,126 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T22:43:11,146 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=151 (was 78) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42521 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:42521 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$890/0x00007f54c4bf46e8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:42521 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Command processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:42521 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:42521 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$890/0x00007f54c4bf46e8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42521 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42521 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:42521 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42521 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:42521 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$890/0x00007f54c4bf46e8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42521 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=436 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1062 (was 1220), ProcessCount=11 (was 11), AvailableMemoryMB=6788 (was 7182) 2024-11-18T22:43:11,165 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=151, OpenFileDescriptor=436, MaxFileDescriptor=1048576, SystemLoadAverage=1062, ProcessCount=11, AvailableMemoryMB=6782 2024-11-18T22:43:11,165 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T22:43:11,165 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/hadoop.log.dir so I do NOT create it in target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2 2024-11-18T22:43:11,165 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/18e77c69-ebbd-7024-faac-82d803fb3c6d/hadoop.tmp.dir so I do NOT create it in target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2 2024-11-18T22:43:11,166 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/cluster_43b4729f-c3e3-e4e6-4e36-b4a5b35a5e02, deleteOnExit=true 2024-11-18T22:43:11,166 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T22:43:11,172 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/test.cache.data in system properties and HBase conf 2024-11-18T22:43:11,172 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T22:43:11,173 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/hadoop.log.dir in system properties and HBase conf 2024-11-18T22:43:11,173 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T22:43:11,173 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T22:43:11,173 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T22:43:11,173 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T22:43:11,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T22:43:11,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T22:43:11,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T22:43:11,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T22:43:11,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T22:43:11,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T22:43:11,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T22:43:11,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T22:43:11,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T22:43:11,175 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/nfs.dump.dir in system properties and HBase conf 2024-11-18T22:43:11,175 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/java.io.tmpdir in system properties and HBase conf 2024-11-18T22:43:11,175 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T22:43:11,175 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T22:43:11,175 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T22:43:11,196 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T22:43:11,676 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:11,756 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:43:11,772 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:43:11,781 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:43:11,781 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:43:11,781 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T22:43:11,782 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:43:11,792 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@265efa03{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:43:11,793 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@58a42b34{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:43:11,918 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4658354a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/java.io.tmpdir/jetty-localhost-41549-hadoop-hdfs-3_4_1-tests_jar-_-any-5946556653423763583/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T22:43:11,919 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2bdc2a53{HTTP/1.1, (http/1.1)}{localhost:41549} 2024-11-18T22:43:11,920 INFO [Time-limited test {}] server.Server(415): Started @185719ms 2024-11-18T22:43:11,936 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T22:43:11,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:12,110 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:12,360 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T22:43:12,382 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:12,382 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:12,383 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:12,383 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:12,383 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:12,385 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:12,389 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:12,393 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:12,400 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:43:12,408 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:43:12,418 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:43:12,418 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:43:12,418 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T22:43:12,419 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77432f4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:43:12,420 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@52bf6dde{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:43:12,550 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@347323d1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/java.io.tmpdir/jetty-localhost-38463-hadoop-hdfs-3_4_1-tests_jar-_-any-10561050744884031470/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:43:12,550 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@438ec950{HTTP/1.1, (http/1.1)}{localhost:38463} 2024-11-18T22:43:12,550 INFO [Time-limited test {}] server.Server(415): Started @186350ms 2024-11-18T22:43:12,552 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T22:43:12,672 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:43:12,677 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:12,680 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:43:12,688 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:43:12,688 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:43:12,688 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T22:43:12,693 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@669883e5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:43:12,694 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ffbb97b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:43:12,823 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@107ec413{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/java.io.tmpdir/jetty-localhost-42069-hadoop-hdfs-3_4_1-tests_jar-_-any-473444556536326838/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:43:12,823 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2ed6b6c2{HTTP/1.1, (http/1.1)}{localhost:42069} 2024-11-18T22:43:12,824 INFO [Time-limited test {}] server.Server(415): Started @186623ms 2024-11-18T22:43:12,825 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T22:43:12,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:13,110 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:13,678 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:13,826 WARN [Thread-1227 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/cluster_43b4729f-c3e3-e4e6-4e36-b4a5b35a5e02/data/data1/current/BP-943418849-172.17.0.2-1731969791211/current, will proceed with Du for space computation calculation, 2024-11-18T22:43:13,826 WARN [Thread-1228 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/cluster_43b4729f-c3e3-e4e6-4e36-b4a5b35a5e02/data/data2/current/BP-943418849-172.17.0.2-1731969791211/current, will proceed with Du for space computation calculation, 2024-11-18T22:43:13,846 WARN [Thread-1191 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T22:43:13,849 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7a72c616347bfe19 with lease ID 0x108c61107ee2c2de: Processing first storage report for DS-a27c901c-d30f-4b69-b5bd-974e2eaa7114 from datanode DatanodeRegistration(127.0.0.1:32993, datanodeUuid=f82ec9c5-3348-4999-82aa-fb9b16116667, infoPort=42067, infoSecurePort=0, ipcPort=41967, storageInfo=lv=-57;cid=testClusterID;nsid=904663137;c=1731969791211) 2024-11-18T22:43:13,849 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7a72c616347bfe19 with lease ID 0x108c61107ee2c2de: from storage DS-a27c901c-d30f-4b69-b5bd-974e2eaa7114 node DatanodeRegistration(127.0.0.1:32993, datanodeUuid=f82ec9c5-3348-4999-82aa-fb9b16116667, infoPort=42067, infoSecurePort=0, ipcPort=41967, storageInfo=lv=-57;cid=testClusterID;nsid=904663137;c=1731969791211), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:43:13,849 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7a72c616347bfe19 with lease ID 0x108c61107ee2c2de: Processing first storage report for DS-b4bc4303-92be-4861-aad0-a0d43acd929f from datanode DatanodeRegistration(127.0.0.1:32993, datanodeUuid=f82ec9c5-3348-4999-82aa-fb9b16116667, infoPort=42067, infoSecurePort=0, ipcPort=41967, storageInfo=lv=-57;cid=testClusterID;nsid=904663137;c=1731969791211) 2024-11-18T22:43:13,849 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7a72c616347bfe19 with lease ID 0x108c61107ee2c2de: from storage DS-b4bc4303-92be-4861-aad0-a0d43acd929f node DatanodeRegistration(127.0.0.1:32993, datanodeUuid=f82ec9c5-3348-4999-82aa-fb9b16116667, infoPort=42067, infoSecurePort=0, ipcPort=41967, storageInfo=lv=-57;cid=testClusterID;nsid=904663137;c=1731969791211), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:43:13,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:14,111 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:14,163 WARN [Thread-1238 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/cluster_43b4729f-c3e3-e4e6-4e36-b4a5b35a5e02/data/data3/current/BP-943418849-172.17.0.2-1731969791211/current, will proceed with Du for space computation calculation, 2024-11-18T22:43:14,168 WARN [Thread-1239 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/cluster_43b4729f-c3e3-e4e6-4e36-b4a5b35a5e02/data/data4/current/BP-943418849-172.17.0.2-1731969791211/current, will proceed with Du for space computation calculation, 2024-11-18T22:43:14,213 WARN [Thread-1214 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T22:43:14,218 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x622a785107f135d8 with lease ID 0x108c61107ee2c2df: Processing first storage report for DS-daec3e77-2264-4d2a-8fef-13e79647b55a from datanode DatanodeRegistration(127.0.0.1:41305, datanodeUuid=0fe48884-7c43-4857-b711-5a2fa75e1401, infoPort=33323, infoSecurePort=0, ipcPort=34941, storageInfo=lv=-57;cid=testClusterID;nsid=904663137;c=1731969791211) 2024-11-18T22:43:14,218 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x622a785107f135d8 with lease ID 0x108c61107ee2c2df: from storage DS-daec3e77-2264-4d2a-8fef-13e79647b55a node DatanodeRegistration(127.0.0.1:41305, datanodeUuid=0fe48884-7c43-4857-b711-5a2fa75e1401, infoPort=33323, infoSecurePort=0, ipcPort=34941, storageInfo=lv=-57;cid=testClusterID;nsid=904663137;c=1731969791211), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:43:14,218 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x622a785107f135d8 with lease ID 0x108c61107ee2c2df: Processing first storage report for DS-f95be902-70e7-4602-a263-ea84c52a9753 from datanode DatanodeRegistration(127.0.0.1:41305, datanodeUuid=0fe48884-7c43-4857-b711-5a2fa75e1401, infoPort=33323, infoSecurePort=0, ipcPort=34941, storageInfo=lv=-57;cid=testClusterID;nsid=904663137;c=1731969791211) 2024-11-18T22:43:14,218 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x622a785107f135d8 with lease ID 0x108c61107ee2c2df: from storage DS-f95be902-70e7-4602-a263-ea84c52a9753 node DatanodeRegistration(127.0.0.1:41305, datanodeUuid=0fe48884-7c43-4857-b711-5a2fa75e1401, infoPort=33323, infoSecurePort=0, ipcPort=34941, storageInfo=lv=-57;cid=testClusterID;nsid=904663137;c=1731969791211), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:43:14,321 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2 2024-11-18T22:43:14,333 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/cluster_43b4729f-c3e3-e4e6-4e36-b4a5b35a5e02/zookeeper_0, clientPort=54626, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/cluster_43b4729f-c3e3-e4e6-4e36-b4a5b35a5e02/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/cluster_43b4729f-c3e3-e4e6-4e36-b4a5b35a5e02/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T22:43:14,335 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54626 2024-11-18T22:43:14,335 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:43:14,337 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:43:14,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32993 is added to blk_1073741825_1001 (size=7) 2024-11-18T22:43:14,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41305 is added to blk_1073741825_1001 (size=7) 2024-11-18T22:43:14,361 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a with version=8 2024-11-18T22:43:14,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/hbase-staging 2024-11-18T22:43:14,364 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3b0ca91ed64a:0 server-side Connection retries=45 2024-11-18T22:43:14,364 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:43:14,364 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T22:43:14,364 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T22:43:14,365 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:43:14,365 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T22:43:14,365 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T22:43:14,365 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T22:43:14,366 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33747 2024-11-18T22:43:14,368 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33747 connecting to ZooKeeper ensemble=127.0.0.1:54626 2024-11-18T22:43:14,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:337470x0, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T22:43:14,444 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33747-0x101501dbcc70000 connected 2024-11-18T22:43:14,679 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:14,752 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:43:14,754 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:43:14,757 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:43:14,757 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a, hbase.cluster.distributed=false 2024-11-18T22:43:14,762 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T22:43:14,763 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33747 2024-11-18T22:43:14,763 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33747 2024-11-18T22:43:14,763 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33747 2024-11-18T22:43:14,768 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33747 2024-11-18T22:43:14,772 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33747 2024-11-18T22:43:14,791 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3b0ca91ed64a:0 server-side Connection retries=45 2024-11-18T22:43:14,792 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:43:14,792 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T22:43:14,792 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T22:43:14,792 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:43:14,792 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T22:43:14,792 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T22:43:14,792 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T22:43:14,800 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34791 2024-11-18T22:43:14,802 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34791 connecting to ZooKeeper ensemble=127.0.0.1:54626 2024-11-18T22:43:14,803 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:43:14,806 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:43:14,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:347910x0, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T22:43:14,826 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:347910x0, quorum=127.0.0.1:54626, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:43:14,826 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34791-0x101501dbcc70001 connected 2024-11-18T22:43:14,826 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T22:43:14,832 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T22:43:14,837 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34791-0x101501dbcc70001, quorum=127.0.0.1:54626, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T22:43:14,839 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34791-0x101501dbcc70001, quorum=127.0.0.1:54626, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T22:43:14,848 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34791 2024-11-18T22:43:14,852 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34791 2024-11-18T22:43:14,856 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34791 2024-11-18T22:43:14,861 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34791 2024-11-18T22:43:14,861 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34791 2024-11-18T22:43:14,885 DEBUG [M:0;3b0ca91ed64a:33747 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3b0ca91ed64a:33747 2024-11-18T22:43:14,892 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3b0ca91ed64a,33747,1731969794364 2024-11-18T22:43:14,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T22:43:14,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101501dbcc70001, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T22:43:14,901 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3b0ca91ed64a,33747,1731969794364 2024-11-18T22:43:14,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101501dbcc70001, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T22:43:14,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101501dbcc70001, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:43:14,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:43:14,910 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T22:43:14,910 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3b0ca91ed64a,33747,1731969794364 from backup master directory 2024-11-18T22:43:14,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101501dbcc70001, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T22:43:14,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3b0ca91ed64a,33747,1731969794364 2024-11-18T22:43:14,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T22:43:14,924 WARN [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T22:43:14,924 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3b0ca91ed64a,33747,1731969794364 2024-11-18T22:43:14,931 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/hbase.id] with ID: c2210226-b54e-46e3-955f-89a4445480b6 2024-11-18T22:43:14,931 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/.tmp/hbase.id 2024-11-18T22:43:14,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32993 is added to blk_1073741826_1002 (size=42) 2024-11-18T22:43:14,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41305 is added to blk_1073741826_1002 (size=42) 2024-11-18T22:43:14,949 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/.tmp/hbase.id]:[hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/hbase.id] 2024-11-18T22:43:14,968 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:15,007 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:43:15,007 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T22:43:15,011 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 4ms. 2024-11-18T22:43:15,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:43:15,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101501dbcc70001, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:43:15,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32993 is added to blk_1073741827_1003 (size=196) 2024-11-18T22:43:15,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41305 is added to blk_1073741827_1003 (size=196) 2024-11-18T22:43:15,102 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T22:43:15,103 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T22:43:15,104 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T22:43:15,112 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:15,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32993 is added to blk_1073741828_1004 (size=1189) 2024-11-18T22:43:15,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41305 is added to blk_1073741828_1004 (size=1189) 2024-11-18T22:43:15,538 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/data/master/store 2024-11-18T22:43:15,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32993 is added to blk_1073741829_1005 (size=34) 2024-11-18T22:43:15,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41305 is added to blk_1073741829_1005 (size=34) 2024-11-18T22:43:15,547 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:43:15,547 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T22:43:15,547 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:43:15,547 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:43:15,547 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T22:43:15,547 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:43:15,547 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:43:15,547 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731969795547Disabling compacts and flushes for region at 1731969795547Disabling writes for close at 1731969795547Writing region close event to WAL at 1731969795547Closed at 1731969795547 2024-11-18T22:43:15,548 WARN [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/data/master/store/.initializing 2024-11-18T22:43:15,549 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/WALs/3b0ca91ed64a,33747,1731969794364 2024-11-18T22:43:15,552 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3b0ca91ed64a%2C33747%2C1731969794364, suffix=, logDir=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/WALs/3b0ca91ed64a,33747,1731969794364, archiveDir=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/oldWALs, maxLogs=10 2024-11-18T22:43:15,552 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C33747%2C1731969794364.1731969795552 2024-11-18T22:43:15,560 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/WALs/3b0ca91ed64a,33747,1731969794364/3b0ca91ed64a%2C33747%2C1731969794364.1731969795552 2024-11-18T22:43:15,568 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42067:42067),(127.0.0.1/127.0.0.1:33323:33323)] 2024-11-18T22:43:15,573 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T22:43:15,573 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:43:15,573 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:43:15,573 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:43:15,574 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:43:15,576 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T22:43:15,576 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:15,577 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:43:15,577 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:43:15,579 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T22:43:15,579 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:15,579 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T22:43:15,580 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:43:15,581 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T22:43:15,581 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:15,582 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T22:43:15,582 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:43:15,584 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T22:43:15,584 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:15,585 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T22:43:15,585 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:43:15,586 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:43:15,586 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:43:15,589 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:43:15,589 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:43:15,590 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T22:43:15,591 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:43:15,596 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T22:43:15,597 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=759116, jitterRate=-0.03473472595214844}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T22:43:15,598 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731969795573Initializing all the Stores at 1731969795574 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969795574Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969795574Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969795574Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969795574Cleaning up temporary data from old regions at 1731969795589 (+15 ms)Region opened successfully at 1731969795598 (+9 ms) 2024-11-18T22:43:15,598 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T22:43:15,602 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ad0391f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3b0ca91ed64a/172.17.0.2:0 2024-11-18T22:43:15,603 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T22:43:15,603 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T22:43:15,603 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T22:43:15,604 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T22:43:15,604 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-18T22:43:15,605 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T22:43:15,605 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T22:43:15,611 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T22:43:15,612 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T22:43:15,656 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T22:43:15,657 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T22:43:15,659 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T22:43:15,680 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:15,704 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T22:43:15,705 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T22:43:15,706 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T22:43:15,730 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T22:43:15,735 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T22:43:15,741 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T22:43:15,745 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T22:43:15,751 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T22:43:15,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T22:43:15,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:43:15,763 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3b0ca91ed64a,33747,1731969794364, sessionid=0x101501dbcc70000, setting cluster-up flag (Was=false) 2024-11-18T22:43:15,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101501dbcc70001, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T22:43:15,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101501dbcc70001, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:43:15,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:43:15,930 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T22:43:15,931 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3b0ca91ed64a,33747,1731969794364 2024-11-18T22:43:15,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101501dbcc70001, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:43:15,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:43:15,969 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:15,983 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T22:43:15,984 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3b0ca91ed64a,33747,1731969794364 2024-11-18T22:43:15,985 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T22:43:15,987 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T22:43:15,987 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T22:43:15,987 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T22:43:15,988 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3b0ca91ed64a,33747,1731969794364 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T22:43:15,989 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3b0ca91ed64a:0, corePoolSize=5, maxPoolSize=5 2024-11-18T22:43:15,989 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3b0ca91ed64a:0, corePoolSize=5, maxPoolSize=5 2024-11-18T22:43:15,989 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=5, maxPoolSize=5 2024-11-18T22:43:15,990 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=5, maxPoolSize=5 2024-11-18T22:43:15,990 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3b0ca91ed64a:0, corePoolSize=10, maxPoolSize=10 2024-11-18T22:43:15,990 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:43:15,990 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=2, maxPoolSize=2 2024-11-18T22:43:15,990 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:43:15,991 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731969825991 2024-11-18T22:43:15,991 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T22:43:15,991 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T22:43:15,991 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T22:43:15,991 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T22:43:15,991 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T22:43:15,991 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T22:43:15,991 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:15,992 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T22:43:15,992 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T22:43:15,992 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T22:43:15,992 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T22:43:15,992 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T22:43:15,992 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T22:43:15,993 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T22:43:15,993 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.large.0-1731969795992,5,FailOnTimeoutGroup] 2024-11-18T22:43:15,993 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.small.0-1731969795993,5,FailOnTimeoutGroup] 2024-11-18T22:43:15,993 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:15,993 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T22:43:15,993 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:15,993 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:15,994 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:15,994 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T22:43:16,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32993 is added to blk_1073741831_1007 (size=1321) 2024-11-18T22:43:16,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41305 is added to blk_1073741831_1007 (size=1321) 2024-11-18T22:43:16,001 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T22:43:16,002 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a 2024-11-18T22:43:16,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32993 is added to blk_1073741832_1008 (size=32) 2024-11-18T22:43:16,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41305 is added to blk_1073741832_1008 (size=32) 2024-11-18T22:43:16,010 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:43:16,011 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T22:43:16,012 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T22:43:16,012 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:16,013 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:43:16,013 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T22:43:16,015 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T22:43:16,015 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:16,015 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:43:16,016 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T22:43:16,017 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T22:43:16,017 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:16,018 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:43:16,018 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T22:43:16,019 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T22:43:16,019 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:16,020 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:43:16,020 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T22:43:16,021 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/hbase/meta/1588230740 2024-11-18T22:43:16,021 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/hbase/meta/1588230740 2024-11-18T22:43:16,022 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T22:43:16,022 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T22:43:16,023 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T22:43:16,024 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T22:43:16,026 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T22:43:16,027 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=701375, jitterRate=-0.10815563797950745}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T22:43:16,028 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731969796010Initializing all the Stores at 1731969796010Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969796011 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969796011Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969796011Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969796011Cleaning up temporary data from old regions at 1731969796022 (+11 ms)Region opened successfully at 1731969796028 (+6 ms) 2024-11-18T22:43:16,028 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T22:43:16,028 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T22:43:16,028 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T22:43:16,028 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T22:43:16,028 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T22:43:16,029 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T22:43:16,029 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731969796028Disabling compacts and flushes for region at 1731969796028Disabling writes for close at 1731969796028Writing region close event to WAL at 1731969796029 (+1 ms)Closed at 1731969796029 2024-11-18T22:43:16,030 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T22:43:16,030 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T22:43:16,030 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T22:43:16,032 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T22:43:16,033 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T22:43:16,071 INFO [RS:0;3b0ca91ed64a:34791 {}] regionserver.HRegionServer(746): ClusterId : c2210226-b54e-46e3-955f-89a4445480b6 2024-11-18T22:43:16,071 DEBUG [RS:0;3b0ca91ed64a:34791 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T22:43:16,078 DEBUG [RS:0;3b0ca91ed64a:34791 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T22:43:16,078 DEBUG [RS:0;3b0ca91ed64a:34791 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T22:43:16,089 DEBUG [RS:0;3b0ca91ed64a:34791 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T22:43:16,089 DEBUG [RS:0;3b0ca91ed64a:34791 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b84819d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3b0ca91ed64a/172.17.0.2:0 2024-11-18T22:43:16,105 DEBUG [RS:0;3b0ca91ed64a:34791 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3b0ca91ed64a:34791 2024-11-18T22:43:16,105 INFO [RS:0;3b0ca91ed64a:34791 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T22:43:16,105 INFO [RS:0;3b0ca91ed64a:34791 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T22:43:16,105 DEBUG [RS:0;3b0ca91ed64a:34791 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T22:43:16,106 INFO [RS:0;3b0ca91ed64a:34791 {}] regionserver.HRegionServer(2659): reportForDuty to master=3b0ca91ed64a,33747,1731969794364 with port=34791, startcode=1731969794791 2024-11-18T22:43:16,106 DEBUG [RS:0;3b0ca91ed64a:34791 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T22:43:16,108 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39327, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T22:43:16,109 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33747 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3b0ca91ed64a,34791,1731969794791 2024-11-18T22:43:16,109 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33747 {}] master.ServerManager(517): Registering regionserver=3b0ca91ed64a,34791,1731969794791 2024-11-18T22:43:16,110 DEBUG [RS:0;3b0ca91ed64a:34791 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a 2024-11-18T22:43:16,111 DEBUG [RS:0;3b0ca91ed64a:34791 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46163 2024-11-18T22:43:16,111 DEBUG [RS:0;3b0ca91ed64a:34791 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T22:43:16,112 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:16,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T22:43:16,120 DEBUG [RS:0;3b0ca91ed64a:34791 {}] zookeeper.ZKUtil(111): regionserver:34791-0x101501dbcc70001, quorum=127.0.0.1:54626, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3b0ca91ed64a,34791,1731969794791 2024-11-18T22:43:16,120 WARN [RS:0;3b0ca91ed64a:34791 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T22:43:16,121 INFO [RS:0;3b0ca91ed64a:34791 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T22:43:16,121 DEBUG [RS:0;3b0ca91ed64a:34791 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791 2024-11-18T22:43:16,121 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3b0ca91ed64a,34791,1731969794791] 2024-11-18T22:43:16,137 INFO [RS:0;3b0ca91ed64a:34791 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T22:43:16,139 INFO [RS:0;3b0ca91ed64a:34791 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T22:43:16,140 INFO [RS:0;3b0ca91ed64a:34791 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T22:43:16,140 INFO [RS:0;3b0ca91ed64a:34791 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:16,144 INFO [RS:0;3b0ca91ed64a:34791 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T22:43:16,149 INFO [RS:0;3b0ca91ed64a:34791 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T22:43:16,149 INFO [RS:0;3b0ca91ed64a:34791 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:16,149 DEBUG [RS:0;3b0ca91ed64a:34791 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:43:16,149 DEBUG [RS:0;3b0ca91ed64a:34791 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:43:16,150 DEBUG [RS:0;3b0ca91ed64a:34791 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:43:16,150 DEBUG [RS:0;3b0ca91ed64a:34791 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:43:16,150 DEBUG [RS:0;3b0ca91ed64a:34791 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:43:16,150 DEBUG [RS:0;3b0ca91ed64a:34791 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3b0ca91ed64a:0, corePoolSize=2, maxPoolSize=2 2024-11-18T22:43:16,150 DEBUG [RS:0;3b0ca91ed64a:34791 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:43:16,150 DEBUG [RS:0;3b0ca91ed64a:34791 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:43:16,150 DEBUG [RS:0;3b0ca91ed64a:34791 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:43:16,150 DEBUG [RS:0;3b0ca91ed64a:34791 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:43:16,150 DEBUG [RS:0;3b0ca91ed64a:34791 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:43:16,150 DEBUG [RS:0;3b0ca91ed64a:34791 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:43:16,150 DEBUG [RS:0;3b0ca91ed64a:34791 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3b0ca91ed64a:0, corePoolSize=3, maxPoolSize=3 2024-11-18T22:43:16,150 DEBUG [RS:0;3b0ca91ed64a:34791 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0, corePoolSize=3, maxPoolSize=3 2024-11-18T22:43:16,152 INFO [RS:0;3b0ca91ed64a:34791 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:16,152 INFO [RS:0;3b0ca91ed64a:34791 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:16,152 INFO [RS:0;3b0ca91ed64a:34791 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:16,152 INFO [RS:0;3b0ca91ed64a:34791 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:16,152 INFO [RS:0;3b0ca91ed64a:34791 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:16,152 INFO [RS:0;3b0ca91ed64a:34791 {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,34791,1731969794791-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T22:43:16,171 INFO [RS:0;3b0ca91ed64a:34791 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T22:43:16,171 INFO [RS:0;3b0ca91ed64a:34791 {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,34791,1731969794791-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:16,172 INFO [RS:0;3b0ca91ed64a:34791 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:16,172 INFO [RS:0;3b0ca91ed64a:34791 {}] regionserver.Replication(171): 3b0ca91ed64a,34791,1731969794791 started 2024-11-18T22:43:16,184 WARN [3b0ca91ed64a:33747 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-18T22:43:16,193 INFO [RS:0;3b0ca91ed64a:34791 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:16,193 INFO [RS:0;3b0ca91ed64a:34791 {}] regionserver.HRegionServer(1482): Serving as 3b0ca91ed64a,34791,1731969794791, RpcServer on 3b0ca91ed64a/172.17.0.2:34791, sessionid=0x101501dbcc70001 2024-11-18T22:43:16,193 DEBUG [RS:0;3b0ca91ed64a:34791 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T22:43:16,193 DEBUG [RS:0;3b0ca91ed64a:34791 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3b0ca91ed64a,34791,1731969794791 2024-11-18T22:43:16,193 DEBUG [RS:0;3b0ca91ed64a:34791 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3b0ca91ed64a,34791,1731969794791' 2024-11-18T22:43:16,193 DEBUG [RS:0;3b0ca91ed64a:34791 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T22:43:16,195 DEBUG [RS:0;3b0ca91ed64a:34791 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T22:43:16,197 DEBUG [RS:0;3b0ca91ed64a:34791 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T22:43:16,197 DEBUG [RS:0;3b0ca91ed64a:34791 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T22:43:16,197 DEBUG [RS:0;3b0ca91ed64a:34791 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3b0ca91ed64a,34791,1731969794791 2024-11-18T22:43:16,197 DEBUG [RS:0;3b0ca91ed64a:34791 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3b0ca91ed64a,34791,1731969794791' 2024-11-18T22:43:16,197 DEBUG [RS:0;3b0ca91ed64a:34791 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T22:43:16,198 DEBUG [RS:0;3b0ca91ed64a:34791 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T22:43:16,198 DEBUG [RS:0;3b0ca91ed64a:34791 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T22:43:16,198 INFO [RS:0;3b0ca91ed64a:34791 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T22:43:16,198 INFO [RS:0;3b0ca91ed64a:34791 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T22:43:16,309 INFO [RS:0;3b0ca91ed64a:34791 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3b0ca91ed64a%2C34791%2C1731969794791, suffix=, logDir=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791, archiveDir=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/oldWALs, maxLogs=32 2024-11-18T22:43:16,310 INFO [RS:0;3b0ca91ed64a:34791 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C34791%2C1731969794791.1731969796310 2024-11-18T22:43:16,325 INFO [RS:0;3b0ca91ed64a:34791 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969796310 2024-11-18T22:43:16,326 DEBUG [RS:0;3b0ca91ed64a:34791 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33323:33323),(127.0.0.1/127.0.0.1:42067:42067)] 2024-11-18T22:43:16,434 DEBUG [3b0ca91ed64a:33747 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T22:43:16,436 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3b0ca91ed64a,34791,1731969794791 2024-11-18T22:43:16,438 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3b0ca91ed64a,34791,1731969794791, state=OPENING 2024-11-18T22:43:16,447 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T22:43:16,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101501dbcc70001, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:43:16,457 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T22:43:16,459 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:43:16,460 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T22:43:16,460 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T22:43:16,460 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3b0ca91ed64a,34791,1731969794791}] 2024-11-18T22:43:16,614 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T22:43:16,617 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49279, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T22:43:16,622 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T22:43:16,622 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T22:43:16,624 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3b0ca91ed64a%2C34791%2C1731969794791.meta, suffix=.meta, logDir=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791, archiveDir=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/oldWALs, maxLogs=32 2024-11-18T22:43:16,625 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C34791%2C1731969794791.meta.1731969796625.meta 2024-11-18T22:43:16,643 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.meta.1731969796625.meta 2024-11-18T22:43:16,644 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33323:33323),(127.0.0.1/127.0.0.1:42067:42067)] 2024-11-18T22:43:16,645 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T22:43:16,645 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T22:43:16,645 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T22:43:16,645 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T22:43:16,645 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T22:43:16,645 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:43:16,645 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T22:43:16,645 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T22:43:16,647 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T22:43:16,648 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T22:43:16,648 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:16,649 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:43:16,649 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T22:43:16,651 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T22:43:16,651 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:16,651 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:43:16,652 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T22:43:16,653 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T22:43:16,653 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:16,654 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:43:16,654 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T22:43:16,655 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T22:43:16,655 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:16,657 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:43:16,657 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T22:43:16,658 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/hbase/meta/1588230740 2024-11-18T22:43:16,660 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/hbase/meta/1588230740 2024-11-18T22:43:16,662 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T22:43:16,662 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T22:43:16,663 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T22:43:16,665 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T22:43:16,666 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=772917, jitterRate=-0.017185688018798828}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T22:43:16,667 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T22:43:16,668 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731969796646Writing region info on filesystem at 1731969796646Initializing all the Stores at 1731969796646Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969796647 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969796647Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969796647Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969796647Cleaning up temporary data from old regions at 1731969796662 (+15 ms)Running coprocessor post-open hooks at 1731969796667 (+5 ms)Region opened successfully at 1731969796668 (+1 ms) 2024-11-18T22:43:16,669 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731969796614 2024-11-18T22:43:16,674 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T22:43:16,674 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T22:43:16,676 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3b0ca91ed64a,34791,1731969794791 2024-11-18T22:43:16,677 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3b0ca91ed64a,34791,1731969794791, state=OPEN 2024-11-18T22:43:16,681 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:16,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T22:43:16,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101501dbcc70001, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T22:43:16,721 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3b0ca91ed64a,34791,1731969794791 2024-11-18T22:43:16,722 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T22:43:16,722 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T22:43:16,726 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T22:43:16,726 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3b0ca91ed64a,34791,1731969794791 in 262 msec 2024-11-18T22:43:16,729 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T22:43:16,729 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 696 msec 2024-11-18T22:43:16,730 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T22:43:16,731 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T22:43:16,732 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T22:43:16,732 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3b0ca91ed64a,34791,1731969794791, seqNum=-1] 2024-11-18T22:43:16,732 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T22:43:16,734 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55023, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T22:43:16,743 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 755 msec 2024-11-18T22:43:16,743 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731969796743, completionTime=-1 2024-11-18T22:43:16,743 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T22:43:16,743 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-18T22:43:16,745 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-18T22:43:16,745 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731969856745 2024-11-18T22:43:16,746 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731969916746 2024-11-18T22:43:16,746 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-18T22:43:16,746 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,33747,1731969794364-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:16,746 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,33747,1731969794364-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:16,746 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,33747,1731969794364-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:16,747 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3b0ca91ed64a:33747, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:16,747 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:16,747 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:16,750 DEBUG [master/3b0ca91ed64a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T22:43:16,752 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.827sec 2024-11-18T22:43:16,752 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T22:43:16,753 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T22:43:16,753 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T22:43:16,753 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T22:43:16,753 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T22:43:16,753 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,33747,1731969794364-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T22:43:16,753 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,33747,1731969794364-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T22:43:16,756 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T22:43:16,757 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T22:43:16,757 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,33747,1731969794364-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:16,776 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@641dfeb4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T22:43:16,776 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3b0ca91ed64a,33747,-1 for getting cluster id 2024-11-18T22:43:16,776 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T22:43:16,795 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c2210226-b54e-46e3-955f-89a4445480b6' 2024-11-18T22:43:16,796 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T22:43:16,797 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c2210226-b54e-46e3-955f-89a4445480b6" 2024-11-18T22:43:16,797 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48266157, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T22:43:16,797 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3b0ca91ed64a,33747,-1] 2024-11-18T22:43:16,797 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T22:43:16,800 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:43:16,803 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36582, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T22:43:16,805 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35afad91, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T22:43:16,807 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T22:43:16,813 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3b0ca91ed64a,34791,1731969794791, seqNum=-1] 2024-11-18T22:43:16,813 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T22:43:16,818 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32850, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T22:43:16,821 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3b0ca91ed64a,33747,1731969794364 2024-11-18T22:43:16,822 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:43:16,826 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T22:43:16,826 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-18T22:43:16,826 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-18T22:43:16,827 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-18T22:43:16,828 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 3b0ca91ed64a,33747,1731969794364 2024-11-18T22:43:16,828 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7750193e 2024-11-18T22:43:16,829 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-18T22:43:16,831 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36596, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-18T22:43:16,832 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33747 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-18T22:43:16,832 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33747 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-18T22:43:16,833 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33747 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T22:43:16,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33747 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-18T22:43:16,836 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-18T22:43:16,836 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:16,836 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33747 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-18T22:43:16,838 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-18T22:43:16,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33747 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T22:43:16,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41305 is added to blk_1073741835_1011 (size=395) 2024-11-18T22:43:16,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32993 is added to blk_1073741835_1011 (size=395) 2024-11-18T22:43:16,871 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b95a13808560ce383c27ab6cb408cb9b, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731969796832.b95a13808560ce383c27ab6cb408cb9b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a 2024-11-18T22:43:16,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41305 is added to blk_1073741836_1012 (size=78) 2024-11-18T22:43:16,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32993 is added to blk_1073741836_1012 (size=78) 2024-11-18T22:43:16,970 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:17,113 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:17,235 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T22:43:17,235 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-18T22:43:17,236 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T22:43:17,236 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-18T22:43:17,236 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-18T22:43:17,236 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-18T22:43:17,292 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731969796832.b95a13808560ce383c27ab6cb408cb9b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:43:17,292 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing b95a13808560ce383c27ab6cb408cb9b, disabling compactions & flushes 2024-11-18T22:43:17,293 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731969796832.b95a13808560ce383c27ab6cb408cb9b. 2024-11-18T22:43:17,293 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731969796832.b95a13808560ce383c27ab6cb408cb9b. 2024-11-18T22:43:17,293 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731969796832.b95a13808560ce383c27ab6cb408cb9b. after waiting 0 ms 2024-11-18T22:43:17,293 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731969796832.b95a13808560ce383c27ab6cb408cb9b. 2024-11-18T22:43:17,293 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731969796832.b95a13808560ce383c27ab6cb408cb9b. 2024-11-18T22:43:17,293 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for b95a13808560ce383c27ab6cb408cb9b: Waiting for close lock at 1731969797292Disabling compacts and flushes for region at 1731969797292Disabling writes for close at 1731969797293 (+1 ms)Writing region close event to WAL at 1731969797293Closed at 1731969797293 2024-11-18T22:43:17,297 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-18T22:43:17,297 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731969796832.b95a13808560ce383c27ab6cb408cb9b.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731969797297"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731969797297"}]},"ts":"1731969797297"} 2024-11-18T22:43:17,301 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-18T22:43:17,303 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-18T22:43:17,304 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731969797304"}]},"ts":"1731969797304"} 2024-11-18T22:43:17,313 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-18T22:43:17,313 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=b95a13808560ce383c27ab6cb408cb9b, ASSIGN}] 2024-11-18T22:43:17,317 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=b95a13808560ce383c27ab6cb408cb9b, ASSIGN 2024-11-18T22:43:17,319 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=b95a13808560ce383c27ab6cb408cb9b, ASSIGN; state=OFFLINE, location=3b0ca91ed64a,34791,1731969794791; forceNewPlan=false, retain=false 2024-11-18T22:43:17,471 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b95a13808560ce383c27ab6cb408cb9b, regionState=OPENING, regionLocation=3b0ca91ed64a,34791,1731969794791 2024-11-18T22:43:17,474 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=b95a13808560ce383c27ab6cb408cb9b, ASSIGN because future has completed 2024-11-18T22:43:17,475 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b95a13808560ce383c27ab6cb408cb9b, server=3b0ca91ed64a,34791,1731969794791}] 2024-11-18T22:43:17,634 INFO [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731969796832.b95a13808560ce383c27ab6cb408cb9b. 2024-11-18T22:43:17,634 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => b95a13808560ce383c27ab6cb408cb9b, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731969796832.b95a13808560ce383c27ab6cb408cb9b.', STARTKEY => '', ENDKEY => ''} 2024-11-18T22:43:17,634 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart b95a13808560ce383c27ab6cb408cb9b 2024-11-18T22:43:17,635 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731969796832.b95a13808560ce383c27ab6cb408cb9b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:43:17,635 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for b95a13808560ce383c27ab6cb408cb9b 2024-11-18T22:43:17,635 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for b95a13808560ce383c27ab6cb408cb9b 2024-11-18T22:43:17,637 INFO [StoreOpener-b95a13808560ce383c27ab6cb408cb9b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region b95a13808560ce383c27ab6cb408cb9b 2024-11-18T22:43:17,642 INFO [StoreOpener-b95a13808560ce383c27ab6cb408cb9b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b95a13808560ce383c27ab6cb408cb9b columnFamilyName info 2024-11-18T22:43:17,643 DEBUG [StoreOpener-b95a13808560ce383c27ab6cb408cb9b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:17,645 INFO [StoreOpener-b95a13808560ce383c27ab6cb408cb9b-1 {}] regionserver.HStore(327): Store=b95a13808560ce383c27ab6cb408cb9b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T22:43:17,645 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for b95a13808560ce383c27ab6cb408cb9b 2024-11-18T22:43:17,647 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/default/TestLogRolling-testLogRollOnPipelineRestart/b95a13808560ce383c27ab6cb408cb9b 2024-11-18T22:43:17,647 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/default/TestLogRolling-testLogRollOnPipelineRestart/b95a13808560ce383c27ab6cb408cb9b 2024-11-18T22:43:17,648 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for b95a13808560ce383c27ab6cb408cb9b 2024-11-18T22:43:17,648 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for b95a13808560ce383c27ab6cb408cb9b 2024-11-18T22:43:17,650 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for b95a13808560ce383c27ab6cb408cb9b 2024-11-18T22:43:17,655 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/default/TestLogRolling-testLogRollOnPipelineRestart/b95a13808560ce383c27ab6cb408cb9b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T22:43:17,656 INFO [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened b95a13808560ce383c27ab6cb408cb9b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=749650, jitterRate=-0.04677128791809082}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T22:43:17,656 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b95a13808560ce383c27ab6cb408cb9b 2024-11-18T22:43:17,657 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for b95a13808560ce383c27ab6cb408cb9b: Running coprocessor pre-open hook at 1731969797635Writing region info on filesystem at 1731969797635Initializing all the Stores at 1731969797636 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969797636Cleaning up temporary data from old regions at 1731969797648 (+12 ms)Running coprocessor post-open hooks at 1731969797656 (+8 ms)Region opened successfully at 1731969797657 (+1 ms) 2024-11-18T22:43:17,658 INFO [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731969796832.b95a13808560ce383c27ab6cb408cb9b., pid=6, masterSystemTime=1731969797629 2024-11-18T22:43:17,660 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731969796832.b95a13808560ce383c27ab6cb408cb9b. 2024-11-18T22:43:17,661 INFO [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731969796832.b95a13808560ce383c27ab6cb408cb9b. 2024-11-18T22:43:17,662 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b95a13808560ce383c27ab6cb408cb9b, regionState=OPEN, openSeqNum=2, regionLocation=3b0ca91ed64a,34791,1731969794791 2024-11-18T22:43:17,664 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b95a13808560ce383c27ab6cb408cb9b, server=3b0ca91ed64a,34791,1731969794791 because future has completed 2024-11-18T22:43:17,669 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-18T22:43:17,669 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure b95a13808560ce383c27ab6cb408cb9b, server=3b0ca91ed64a,34791,1731969794791 in 191 msec 2024-11-18T22:43:17,673 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-18T22:43:17,673 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=b95a13808560ce383c27ab6cb408cb9b, ASSIGN in 356 msec 2024-11-18T22:43:17,675 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-18T22:43:17,675 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731969797675"}]},"ts":"1731969797675"} 2024-11-18T22:43:17,677 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-18T22:43:17,679 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-18T22:43:17,682 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 846 msec 2024-11-18T22:43:17,684 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:17,971 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:18,114 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:18,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:18,972 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:19,114 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:19,685 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:19,973 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:20,115 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:20,686 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:20,973 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:21,116 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:21,687 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:21,974 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:22,116 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:22,148 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T22:43:22,173 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:22,174 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:22,174 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:22,175 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:22,175 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:22,175 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:22,181 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:22,184 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:22,191 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-18T22:43:22,192 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-18T22:43:22,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:22,975 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:23,118 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:23,688 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:23,976 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:24,119 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:24,689 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:24,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:25,120 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:25,690 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:25,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:26,122 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:26,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:26,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33747 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T22:43:26,914 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-18T22:43:26,914 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-18T22:43:26,918 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-18T22:43:26,918 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731969796832.b95a13808560ce383c27ab6cb408cb9b. 2024-11-18T22:43:26,924 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731969796832.b95a13808560ce383c27ab6cb408cb9b., hostname=3b0ca91ed64a,34791,1731969794791, seqNum=2] 2024-11-18T22:43:26,979 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:27,123 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:27,235 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-18T22:43:27,235 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-18T22:43:27,691 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:27,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:28,123 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:28,692 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:28,928 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969796310 2024-11-18T22:43:28,930 WARN [ResponseProcessor for block BP-943418849-172.17.0.2-1731969791211:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-943418849-172.17.0.2-1731969791211:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:28,930 WARN [ResponseProcessor for block BP-943418849-172.17.0.2-1731969791211:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-943418849-172.17.0.2-1731969791211:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-943418849-172.17.0.2-1731969791211:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:41305,DS-daec3e77-2264-4d2a-8fef-13e79647b55a,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:28,931 WARN [DataStreamer for file /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.meta.1731969796625.meta block BP-943418849-172.17.0.2-1731969791211:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-943418849-172.17.0.2-1731969791211:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41305,DS-daec3e77-2264-4d2a-8fef-13e79647b55a,DISK], DatanodeInfoWithStorage[127.0.0.1:32993,DS-a27c901c-d30f-4b69-b5bd-974e2eaa7114,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41305,DS-daec3e77-2264-4d2a-8fef-13e79647b55a,DISK]) is bad. 2024-11-18T22:43:28,931 WARN [DataStreamer for file /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/WALs/3b0ca91ed64a,33747,1731969794364/3b0ca91ed64a%2C33747%2C1731969794364.1731969795552 block BP-943418849-172.17.0.2-1731969791211:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-943418849-172.17.0.2-1731969791211:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32993,DS-a27c901c-d30f-4b69-b5bd-974e2eaa7114,DISK], DatanodeInfoWithStorage[127.0.0.1:41305,DS-daec3e77-2264-4d2a-8fef-13e79647b55a,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41305,DS-daec3e77-2264-4d2a-8fef-13e79647b55a,DISK]) is bad. 2024-11-18T22:43:28,931 WARN [PacketResponder: BP-943418849-172.17.0.2-1731969791211:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:41305] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:43:28,929 WARN [ResponseProcessor for block BP-943418849-172.17.0.2-1731969791211:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-943418849-172.17.0.2-1731969791211:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:28,931 WARN [DataStreamer for file /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969796310 block BP-943418849-172.17.0.2-1731969791211:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-943418849-172.17.0.2-1731969791211:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41305,DS-daec3e77-2264-4d2a-8fef-13e79647b55a,DISK], DatanodeInfoWithStorage[127.0.0.1:32993,DS-a27c901c-d30f-4b69-b5bd-974e2eaa7114,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41305,DS-daec3e77-2264-4d2a-8fef-13e79647b55a,DISK]) is bad. 2024-11-18T22:43:28,932 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1920034213_22 at /127.0.0.1:58826 [Receiving block BP-943418849-172.17.0.2-1731969791211:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:32993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58826 dst: /127.0.0.1:32993 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:43:28,932 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_28316604_22 at /127.0.0.1:58850 [Receiving block BP-943418849-172.17.0.2-1731969791211:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:32993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58850 dst: /127.0.0.1:32993 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:43:28,931 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_28316604_22 at /127.0.0.1:54762 [Receiving block BP-943418849-172.17.0.2-1731969791211:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54762 dst: /127.0.0.1:41305 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:43:28,932 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_28316604_22 at /127.0.0.1:54750 [Receiving block BP-943418849-172.17.0.2-1731969791211:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54750 dst: /127.0.0.1:41305 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:43:28,932 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1920034213_22 at /127.0.0.1:54728 [Receiving block BP-943418849-172.17.0.2-1731969791211:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41305:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54728 dst: /127.0.0.1:41305 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:43:28,933 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_28316604_22 at /127.0.0.1:58842 [Receiving block BP-943418849-172.17.0.2-1731969791211:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:32993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58842 dst: /127.0.0.1:32993 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:43:28,968 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@107ec413{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:43:28,969 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2ed6b6c2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:43:28,969 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:43:28,969 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ffbb97b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:43:28,969 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@669883e5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/hadoop.log.dir/,STOPPED} 2024-11-18T22:43:28,970 WARN [BP-943418849-172.17.0.2-1731969791211 heartbeating to localhost/127.0.0.1:46163 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T22:43:28,970 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T22:43:28,970 WARN [BP-943418849-172.17.0.2-1731969791211 heartbeating to localhost/127.0.0.1:46163 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-943418849-172.17.0.2-1731969791211 (Datanode Uuid 0fe48884-7c43-4857-b711-5a2fa75e1401) service to localhost/127.0.0.1:46163 2024-11-18T22:43:28,970 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T22:43:28,971 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/cluster_43b4729f-c3e3-e4e6-4e36-b4a5b35a5e02/data/data3/current/BP-943418849-172.17.0.2-1731969791211 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:43:28,971 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/cluster_43b4729f-c3e3-e4e6-4e36-b4a5b35a5e02/data/data4/current/BP-943418849-172.17.0.2-1731969791211 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:43:28,971 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T22:43:28,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:28,985 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:43:28,988 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:43:28,989 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:43:28,989 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:43:28,990 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T22:43:28,990 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10b3b4b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:43:28,990 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b07903e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:43:29,148 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:29,156 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@24cc63e3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/java.io.tmpdir/jetty-localhost-41733-hadoop-hdfs-3_4_1-tests_jar-_-any-16489903746259758829/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:43:29,157 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@34cfd6a7{HTTP/1.1, (http/1.1)}{localhost:41733} 2024-11-18T22:43:29,157 INFO [Time-limited test {}] server.Server(415): Started @202957ms 2024-11-18T22:43:29,159 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T22:43:29,248 WARN [ResponseProcessor for block BP-943418849-172.17.0.2-1731969791211:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-943418849-172.17.0.2-1731969791211:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:29,249 WARN [ResponseProcessor for block BP-943418849-172.17.0.2-1731969791211:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-943418849-172.17.0.2-1731969791211:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:29,249 WARN [ResponseProcessor for block BP-943418849-172.17.0.2-1731969791211:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-943418849-172.17.0.2-1731969791211:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:29,249 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1920034213_22 at /127.0.0.1:56980 [Receiving block BP-943418849-172.17.0.2-1731969791211:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:32993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56980 dst: /127.0.0.1:32993 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:43:29,249 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_28316604_22 at /127.0.0.1:56960 [Receiving block BP-943418849-172.17.0.2-1731969791211:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:32993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56960 dst: /127.0.0.1:32993 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:43:29,250 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_28316604_22 at /127.0.0.1:56970 [Receiving block BP-943418849-172.17.0.2-1731969791211:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:32993:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56970 dst: /127.0.0.1:32993 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:43:29,261 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@347323d1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:43:29,262 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@438ec950{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:43:29,262 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:43:29,262 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@52bf6dde{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:43:29,262 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77432f4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/hadoop.log.dir/,STOPPED} 2024-11-18T22:43:29,273 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T22:43:29,273 WARN [BP-943418849-172.17.0.2-1731969791211 heartbeating to localhost/127.0.0.1:46163 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T22:43:29,273 WARN [BP-943418849-172.17.0.2-1731969791211 heartbeating to localhost/127.0.0.1:46163 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-943418849-172.17.0.2-1731969791211 (Datanode Uuid f82ec9c5-3348-4999-82aa-fb9b16116667) service to localhost/127.0.0.1:46163 2024-11-18T22:43:29,273 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T22:43:29,274 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/cluster_43b4729f-c3e3-e4e6-4e36-b4a5b35a5e02/data/data1/current/BP-943418849-172.17.0.2-1731969791211 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:43:29,274 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/cluster_43b4729f-c3e3-e4e6-4e36-b4a5b35a5e02/data/data2/current/BP-943418849-172.17.0.2-1731969791211 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:43:29,274 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T22:43:29,292 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:43:29,300 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:43:29,310 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:43:29,310 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:43:29,310 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T22:43:29,310 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74025bad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:43:29,311 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56983df4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:43:29,456 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6baac986{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/java.io.tmpdir/jetty-localhost-34769-hadoop-hdfs-3_4_1-tests_jar-_-any-17058071665926104107/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:43:29,457 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1c130819{HTTP/1.1, (http/1.1)}{localhost:34769} 2024-11-18T22:43:29,457 INFO [Time-limited test {}] server.Server(415): Started @203257ms 2024-11-18T22:43:29,458 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T22:43:29,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:29,981 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:30,025 WARN [Thread-1362 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T22:43:30,032 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb68c4d34a298476a with lease ID 0x108c61107ee2c2e0: from storage DS-daec3e77-2264-4d2a-8fef-13e79647b55a node DatanodeRegistration(127.0.0.1:38915, datanodeUuid=0fe48884-7c43-4857-b711-5a2fa75e1401, infoPort=42293, infoSecurePort=0, ipcPort=44815, storageInfo=lv=-57;cid=testClusterID;nsid=904663137;c=1731969791211), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:43:30,032 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb68c4d34a298476a with lease ID 0x108c61107ee2c2e0: from storage DS-f95be902-70e7-4602-a263-ea84c52a9753 node DatanodeRegistration(127.0.0.1:38915, datanodeUuid=0fe48884-7c43-4857-b711-5a2fa75e1401, infoPort=42293, infoSecurePort=0, ipcPort=44815, storageInfo=lv=-57;cid=testClusterID;nsid=904663137;c=1731969791211), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:43:30,148 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:30,302 WARN [Thread-1382 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T22:43:30,305 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xda3d2bcc8f60ecd3 with lease ID 0x108c61107ee2c2e1: from storage DS-a27c901c-d30f-4b69-b5bd-974e2eaa7114 node DatanodeRegistration(127.0.0.1:41591, datanodeUuid=f82ec9c5-3348-4999-82aa-fb9b16116667, infoPort=34549, infoSecurePort=0, ipcPort=36357, storageInfo=lv=-57;cid=testClusterID;nsid=904663137;c=1731969791211), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T22:43:30,305 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xda3d2bcc8f60ecd3 with lease ID 0x108c61107ee2c2e1: from storage DS-b4bc4303-92be-4861-aad0-a0d43acd929f node DatanodeRegistration(127.0.0.1:41591, datanodeUuid=f82ec9c5-3348-4999-82aa-fb9b16116667, infoPort=34549, infoSecurePort=0, ipcPort=36357, storageInfo=lv=-57;cid=testClusterID;nsid=904663137;c=1731969791211), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:43:30,517 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-18T22:43:30,520 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-18T22:43:30,521 ERROR [FSHLog-0-hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a-prefix:3b0ca91ed64a,34791,1731969794791 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32993,DS-a27c901c-d30f-4b69-b5bd-974e2eaa7114,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:30,521 WARN [FSHLog-0-hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a-prefix:3b0ca91ed64a,34791,1731969794791 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32993,DS-a27c901c-d30f-4b69-b5bd-974e2eaa7114,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:30,521 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3b0ca91ed64a%2C34791%2C1731969794791:(num 1731969796310) roll requested 2024-11-18T22:43:30,522 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C34791%2C1731969794791.1731969810521 2024-11-18T22:43:30,527 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969796310 newFile=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969810521 2024-11-18T22:43:30,527 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:30,527 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:30,527 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:30,528 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:30,528 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:30,528 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969796310 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969810521 2024-11-18T22:43:30,529 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32993,DS-a27c901c-d30f-4b69-b5bd-974e2eaa7114,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:30,529 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32993,DS-a27c901c-d30f-4b69-b5bd-974e2eaa7114,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:30,529 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969796310 2024-11-18T22:43:30,530 WARN [IPC Server handler 2 on default port 46163 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969796310 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-18T22:43:30,530 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42293:42293),(127.0.0.1/127.0.0.1:34549:34549)] 2024-11-18T22:43:30,530 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969796310 is not closed yet, will try archiving it next time 2024-11-18T22:43:30,530 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969796310 after 1ms 2024-11-18T22:43:30,693 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:30,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:31,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:31,694 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:31,983 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:32,150 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:32,536 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-18T22:43:32,696 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:32,983 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:33,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:33,697 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:33,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:34,034 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-18T22:43:34,153 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:34,531 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969796310 after 4002ms 2024-11-18T22:43:34,557 WARN [ResponseProcessor for block BP-943418849-172.17.0.2-1731969791211:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-943418849-172.17.0.2-1731969791211:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-943418849-172.17.0.2-1731969791211:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:41591,DS-a27c901c-d30f-4b69-b5bd-974e2eaa7114,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:34,557 WARN [DataStreamer for file /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969810521 block BP-943418849-172.17.0.2-1731969791211:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-943418849-172.17.0.2-1731969791211:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38915,DS-daec3e77-2264-4d2a-8fef-13e79647b55a,DISK], DatanodeInfoWithStorage[127.0.0.1:41591,DS-a27c901c-d30f-4b69-b5bd-974e2eaa7114,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41591,DS-a27c901c-d30f-4b69-b5bd-974e2eaa7114,DISK]) is bad. 2024-11-18T22:43:34,557 WARN [PacketResponder: BP-943418849-172.17.0.2-1731969791211:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:41591] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:43:34,561 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_28316604_22 at /127.0.0.1:58002 [Receiving block BP-943418849-172.17.0.2-1731969791211:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:38915:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58002 dst: /127.0.0.1:38915 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:43:34,561 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_28316604_22 at /127.0.0.1:59542 [Receiving block BP-943418849-172.17.0.2-1731969791211:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:41591:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59542 dst: /127.0.0.1:41591 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:43:34,586 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6baac986{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:43:34,587 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1c130819{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:43:34,587 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:43:34,587 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56983df4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:43:34,588 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74025bad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/hadoop.log.dir/,STOPPED} 2024-11-18T22:43:34,603 WARN [BP-943418849-172.17.0.2-1731969791211 heartbeating to localhost/127.0.0.1:46163 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T22:43:34,603 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T22:43:34,603 WARN [BP-943418849-172.17.0.2-1731969791211 heartbeating to localhost/127.0.0.1:46163 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-943418849-172.17.0.2-1731969791211 (Datanode Uuid f82ec9c5-3348-4999-82aa-fb9b16116667) service to localhost/127.0.0.1:46163 2024-11-18T22:43:34,603 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T22:43:34,604 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/cluster_43b4729f-c3e3-e4e6-4e36-b4a5b35a5e02/data/data1/current/BP-943418849-172.17.0.2-1731969791211 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:43:34,604 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/cluster_43b4729f-c3e3-e4e6-4e36-b4a5b35a5e02/data/data2/current/BP-943418849-172.17.0.2-1731969791211 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:43:34,605 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T22:43:34,621 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:43:34,626 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:43:34,628 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:43:34,628 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:43:34,628 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T22:43:34,629 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@727fa385{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:43:34,629 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65accf8e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:43:34,698 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:34,761 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5efc28c8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/java.io.tmpdir/jetty-localhost-45821-hadoop-hdfs-3_4_1-tests_jar-_-any-5116181701908527746/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:43:34,764 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@75d818fe{HTTP/1.1, (http/1.1)}{localhost:45821} 2024-11-18T22:43:34,765 INFO [Time-limited test {}] server.Server(415): Started @208565ms 2024-11-18T22:43:34,766 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T22:43:34,830 WARN [ResponseProcessor for block BP-943418849-172.17.0.2-1731969791211:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-943418849-172.17.0.2-1731969791211:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:34,831 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_28316604_22 at /127.0.0.1:53522 [Receiving block BP-943418849-172.17.0.2-1731969791211:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:38915:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53522 dst: /127.0.0.1:38915 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:43:34,871 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@24cc63e3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:43:34,872 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@34cfd6a7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:43:34,872 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:43:34,872 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b07903e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:43:34,872 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10b3b4b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/hadoop.log.dir/,STOPPED} 2024-11-18T22:43:34,877 WARN [BP-943418849-172.17.0.2-1731969791211 heartbeating to localhost/127.0.0.1:46163 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T22:43:34,877 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T22:43:34,877 WARN [BP-943418849-172.17.0.2-1731969791211 heartbeating to localhost/127.0.0.1:46163 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-943418849-172.17.0.2-1731969791211 (Datanode Uuid 0fe48884-7c43-4857-b711-5a2fa75e1401) service to localhost/127.0.0.1:46163 2024-11-18T22:43:34,877 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T22:43:34,878 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/cluster_43b4729f-c3e3-e4e6-4e36-b4a5b35a5e02/data/data3/current/BP-943418849-172.17.0.2-1731969791211 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:43:34,878 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/cluster_43b4729f-c3e3-e4e6-4e36-b4a5b35a5e02/data/data4/current/BP-943418849-172.17.0.2-1731969791211 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:43:34,878 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T22:43:34,890 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:43:34,896 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:43:34,900 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:43:34,901 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:43:34,901 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T22:43:34,908 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6edb8d2f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:43:34,908 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5295748e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:43:34,985 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:35,038 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4e65fed6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/java.io.tmpdir/jetty-localhost-42283-hadoop-hdfs-3_4_1-tests_jar-_-any-2514002140635200218/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:43:35,040 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1e0f5c1e{HTTP/1.1, (http/1.1)}{localhost:42283} 2024-11-18T22:43:35,040 INFO [Time-limited test {}] server.Server(415): Started @208840ms 2024-11-18T22:43:35,043 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T22:43:35,154 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:35,699 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:35,722 WARN [Thread-1436 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T22:43:35,727 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x13c8e06ba2ffae6 with lease ID 0x108c61107ee2c2e2: from storage DS-a27c901c-d30f-4b69-b5bd-974e2eaa7114 node DatanodeRegistration(127.0.0.1:44141, datanodeUuid=f82ec9c5-3348-4999-82aa-fb9b16116667, infoPort=45259, infoSecurePort=0, ipcPort=34527, storageInfo=lv=-57;cid=testClusterID;nsid=904663137;c=1731969791211), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:43:35,727 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x13c8e06ba2ffae6 with lease ID 0x108c61107ee2c2e2: from storage DS-b4bc4303-92be-4861-aad0-a0d43acd929f node DatanodeRegistration(127.0.0.1:44141, datanodeUuid=f82ec9c5-3348-4999-82aa-fb9b16116667, infoPort=45259, infoSecurePort=0, ipcPort=34527, storageInfo=lv=-57;cid=testClusterID;nsid=904663137;c=1731969791211), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:43:35,847 WARN [Thread-1456 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T22:43:35,850 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8753fca79439c574 with lease ID 0x108c61107ee2c2e3: from storage DS-daec3e77-2264-4d2a-8fef-13e79647b55a node DatanodeRegistration(127.0.0.1:35221, datanodeUuid=0fe48884-7c43-4857-b711-5a2fa75e1401, infoPort=38759, infoSecurePort=0, ipcPort=37875, storageInfo=lv=-57;cid=testClusterID;nsid=904663137;c=1731969791211), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:43:35,850 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8753fca79439c574 with lease ID 0x108c61107ee2c2e3: from storage DS-f95be902-70e7-4602-a263-ea84c52a9753 node DatanodeRegistration(127.0.0.1:35221, datanodeUuid=0fe48884-7c43-4857-b711-5a2fa75e1401, infoPort=38759, infoSecurePort=0, ipcPort=37875, storageInfo=lv=-57;cid=testClusterID;nsid=904663137;c=1731969791211), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:43:35,986 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:36,109 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-18T22:43:36,112 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-18T22:43:36,114 ERROR [FSHLog-0-hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a-prefix:3b0ca91ed64a,34791,1731969794791 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38915,DS-daec3e77-2264-4d2a-8fef-13e79647b55a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:36,114 WARN [FSHLog-0-hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a-prefix:3b0ca91ed64a,34791,1731969794791 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38915,DS-daec3e77-2264-4d2a-8fef-13e79647b55a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:36,114 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3b0ca91ed64a%2C34791%2C1731969794791:(num 1731969810521) roll requested 2024-11-18T22:43:36,115 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C34791%2C1731969794791.1731969816115 2024-11-18T22:43:36,130 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969810521 newFile=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969816115 2024-11-18T22:43:36,130 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:36,130 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:36,130 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:36,130 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:36,130 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:36,131 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969810521 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969816115 2024-11-18T22:43:36,131 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38915,DS-daec3e77-2264-4d2a-8fef-13e79647b55a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:36,131 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38915,DS-daec3e77-2264-4d2a-8fef-13e79647b55a,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:36,131 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969810521 2024-11-18T22:43:36,132 WARN [IPC Server handler 2 on default port 46163 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969810521 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-18T22:43:36,132 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969810521 after 1ms 2024-11-18T22:43:36,136 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38759:38759),(127.0.0.1/127.0.0.1:45259:45259)] 2024-11-18T22:43:36,137 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969810521 is not closed yet, will try archiving it next time 2024-11-18T22:43:36,154 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:36,700 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:36,987 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:37,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:37,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:37,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:38,141 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C34791%2C1731969794791.1731969818140 2024-11-18T22:43:38,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:38,201 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969816115 newFile=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969818140 2024-11-18T22:43:38,201 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:38,202 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:38,204 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:38,204 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:38,205 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:38,205 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969816115 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969818140 2024-11-18T22:43:38,212 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38759:38759),(127.0.0.1/127.0.0.1:45259:45259)] 2024-11-18T22:43:38,212 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969810521 is not closed yet, will try archiving it next time 2024-11-18T22:43:38,212 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969816115 is not closed yet, will try archiving it next time 2024-11-18T22:43:38,213 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969796310 2024-11-18T22:43:38,213 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969796310 2024-11-18T22:43:38,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44141 is added to blk_1073741838_1019 (size=1264) 2024-11-18T22:43:38,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741838_1019 (size=1264) 2024-11-18T22:43:38,218 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969796310 after 5ms 2024-11-18T22:43:38,218 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969796310 2024-11-18T22:43:38,266 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731969797657/Put/vlen=218/seqid=0] 2024-11-18T22:43:38,266 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731969806926/Put/vlen=1045/seqid=0] 2024-11-18T22:43:38,266 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969796310 2024-11-18T22:43:38,266 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969810521 2024-11-18T22:43:38,266 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969810521 2024-11-18T22:43:38,267 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969810521 after 1ms 2024-11-18T22:43:38,267 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969810521 2024-11-18T22:43:38,272 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731969810521/Put/vlen=1045/seqid=0] 2024-11-18T22:43:38,272 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731969812538/Put/vlen=1045/seqid=0] 2024-11-18T22:43:38,272 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969810521 2024-11-18T22:43:38,272 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969816115 2024-11-18T22:43:38,272 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969816115 2024-11-18T22:43:38,276 WARN [IPC Server handler 4 on default port 46163 {}] namenode.FSNamesystem(3730): BLOCK* internalReleaseLease: All existing blocks are COMPLETE, lease removed, file /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969816115 closed. 2024-11-18T22:43:38,280 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969816115 after 8ms 2024-11-18T22:43:38,280 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969816115 2024-11-18T22:43:38,289 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731969816114/Put/vlen=1045/seqid=0] 2024-11-18T22:43:38,289 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969818140 2024-11-18T22:43:38,289 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969818140 2024-11-18T22:43:38,290 WARN [IPC Server handler 3 on default port 46163 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969818140 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-18T22:43:38,290 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969818140 after 1ms 2024-11-18T22:43:38,619 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969810521 is not closed yet, will try archiving it next time 2024-11-18T22:43:38,701 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:38,851 WARN [ResponseProcessor for block BP-943418849-172.17.0.2-1731969791211:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-943418849-172.17.0.2-1731969791211:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:38,851 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1920034213_22 at /127.0.0.1:42282 [Receiving block BP-943418849-172.17.0.2-1731969791211:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:35221:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42282 dst: /127.0.0.1:35221 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:35221 remote=/127.0.0.1:42282]. Total timeout mills is 60000, 59345 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:43:38,851 WARN [DataStreamer for file /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969818140 block BP-943418849-172.17.0.2-1731969791211:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-943418849-172.17.0.2-1731969791211:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35221,DS-daec3e77-2264-4d2a-8fef-13e79647b55a,DISK], DatanodeInfoWithStorage[127.0.0.1:44141,DS-a27c901c-d30f-4b69-b5bd-974e2eaa7114,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35221,DS-daec3e77-2264-4d2a-8fef-13e79647b55a,DISK]) is bad. 2024-11-18T22:43:38,851 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1920034213_22 at /127.0.0.1:59596 [Receiving block BP-943418849-172.17.0.2-1731969791211:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:44141:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59596 dst: /127.0.0.1:44141 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:43:38,853 WARN [DataStreamer for file /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969818140 block BP-943418849-172.17.0.2-1731969791211:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-943418849-172.17.0.2-1731969791211:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:38,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741839_1022 (size=85) 2024-11-18T22:43:38,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44141 is added to blk_1073741839_1022 (size=85) 2024-11-18T22:43:38,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:39,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:39,702 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:39,733 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-18T22:43:39,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:40,133 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969810521 after 4002ms 2024-11-18T22:43:40,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:40,703 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:40,990 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:41,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:41,704 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:41,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:42,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:42,291 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969818140 after 4002ms 2024-11-18T22:43:42,291 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969818140 2024-11-18T22:43:42,296 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969818140 2024-11-18T22:43:42,296 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-18T22:43:42,297 ERROR [FSHLog-0-hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a-prefix:3b0ca91ed64a,34791,1731969794791.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32993,DS-a27c901c-d30f-4b69-b5bd-974e2eaa7114,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:42,297 WARN [FSHLog-0-hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a-prefix:3b0ca91ed64a,34791,1731969794791.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32993,DS-a27c901c-d30f-4b69-b5bd-974e2eaa7114,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:42,297 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3b0ca91ed64a%2C34791%2C1731969794791.meta:.meta(num 1731969796625) roll requested 2024-11-18T22:43:42,298 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C34791%2C1731969794791.meta.1731969822297.meta 2024-11-18T22:43:42,309 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:42,312 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:42,312 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:42,312 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:42,316 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:42,316 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.meta.1731969796625.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.meta.1731969822297.meta 2024-11-18T22:43:42,317 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32993,DS-a27c901c-d30f-4b69-b5bd-974e2eaa7114,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:42,317 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32993,DS-a27c901c-d30f-4b69-b5bd-974e2eaa7114,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:42,318 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.meta.1731969796625.meta 2024-11-18T22:43:42,318 WARN [IPC Server handler 3 on default port 46163 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.meta.1731969796625.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1013 2024-11-18T22:43:42,319 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.meta.1731969796625.meta after 1ms 2024-11-18T22:43:42,327 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38759:38759),(127.0.0.1/127.0.0.1:45259:45259)] 2024-11-18T22:43:42,327 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.meta.1731969796625.meta is not closed yet, will try archiving it next time 2024-11-18T22:43:42,367 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/hbase/meta/1588230740/.tmp/info/e1ac234a225c4f99bc2dcd2c96cb7697 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731969796832.b95a13808560ce383c27ab6cb408cb9b./info:regioninfo/1731969797661/Put/seqid=0 2024-11-18T22:43:42,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741841_1025 (size=7125) 2024-11-18T22:43:42,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44141 is added to blk_1073741841_1025 (size=7125) 2024-11-18T22:43:42,379 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/hbase/meta/1588230740/.tmp/info/e1ac234a225c4f99bc2dcd2c96cb7697 2024-11-18T22:43:42,408 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/hbase/meta/1588230740/.tmp/ns/613c4605a2d346e2a3b11059add576a5 is 43, key is default/ns:d/1731969796735/Put/seqid=0 2024-11-18T22:43:42,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741842_1026 (size=5153) 2024-11-18T22:43:42,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44141 is added to blk_1073741842_1026 (size=5153) 2024-11-18T22:43:42,418 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/hbase/meta/1588230740/.tmp/ns/613c4605a2d346e2a3b11059add576a5 2024-11-18T22:43:42,458 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/hbase/meta/1588230740/.tmp/table/894e20b780ab4121b46fb53625b5ec2c is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731969797675/Put/seqid=0 2024-11-18T22:43:42,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741843_1027 (size=5438) 2024-11-18T22:43:42,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44141 is added to blk_1073741843_1027 (size=5438) 2024-11-18T22:43:42,484 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/hbase/meta/1588230740/.tmp/table/894e20b780ab4121b46fb53625b5ec2c 2024-11-18T22:43:42,496 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/hbase/meta/1588230740/.tmp/info/e1ac234a225c4f99bc2dcd2c96cb7697 as hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/hbase/meta/1588230740/info/e1ac234a225c4f99bc2dcd2c96cb7697 2024-11-18T22:43:42,503 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/hbase/meta/1588230740/info/e1ac234a225c4f99bc2dcd2c96cb7697, entries=10, sequenceid=11, filesize=7.0 K 2024-11-18T22:43:42,505 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/hbase/meta/1588230740/.tmp/ns/613c4605a2d346e2a3b11059add576a5 as hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/hbase/meta/1588230740/ns/613c4605a2d346e2a3b11059add576a5 2024-11-18T22:43:42,515 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/hbase/meta/1588230740/ns/613c4605a2d346e2a3b11059add576a5, entries=2, sequenceid=11, filesize=5.0 K 2024-11-18T22:43:42,517 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/hbase/meta/1588230740/.tmp/table/894e20b780ab4121b46fb53625b5ec2c as hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/hbase/meta/1588230740/table/894e20b780ab4121b46fb53625b5ec2c 2024-11-18T22:43:42,531 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/hbase/meta/1588230740/table/894e20b780ab4121b46fb53625b5ec2c, entries=2, sequenceid=11, filesize=5.3 K 2024-11-18T22:43:42,532 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 236ms, sequenceid=11, compaction requested=false 2024-11-18T22:43:42,533 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-18T22:43:42,533 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing b95a13808560ce383c27ab6cb408cb9b 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-18T22:43:42,533 ERROR [FSHLog-0-hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a-prefix:3b0ca91ed64a,34791,1731969794791 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-943418849-172.17.0.2-1731969791211:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:42,534 WARN [FSHLog-0-hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a-prefix:3b0ca91ed64a,34791,1731969794791 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-943418849-172.17.0.2-1731969791211:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:42,540 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 3b0ca91ed64a%2C34791%2C1731969794791:(num 1731969818140) roll requested 2024-11-18T22:43:42,540 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C34791%2C1731969794791.1731969822540 2024-11-18T22:43:42,583 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969818140 newFile=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969822540 2024-11-18T22:43:42,583 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:42,584 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:42,585 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:42,588 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:42,591 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:42,592 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969818140 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969822540 2024-11-18T22:43:42,592 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-943418849-172.17.0.2-1731969791211:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:42,593 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-943418849-172.17.0.2-1731969791211:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor190.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:42,593 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969818140 2024-11-18T22:43:42,603 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969818140 after 9ms 2024-11-18T22:43:42,603 DEBUG [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38759:38759),(127.0.0.1/127.0.0.1:45259:45259)] 2024-11-18T22:43:42,603 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.1731969818140 to hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/oldWALs/3b0ca91ed64a%2C34791%2C1731969794791.1731969818140 2024-11-18T22:43:42,648 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/default/TestLogRolling-testLogRollOnPipelineRestart/b95a13808560ce383c27ab6cb408cb9b/.tmp/info/7ab1d848093242daaf542db42daac059 is 1080, key is row1002/info:/1731969806926/Put/seqid=0 2024-11-18T22:43:42,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741845_1029 (size=9270) 2024-11-18T22:43:42,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44141 is added to blk_1073741845_1029 (size=9270) 2024-11-18T22:43:42,674 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/default/TestLogRolling-testLogRollOnPipelineRestart/b95a13808560ce383c27ab6cb408cb9b/.tmp/info/7ab1d848093242daaf542db42daac059 2024-11-18T22:43:42,686 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/default/TestLogRolling-testLogRollOnPipelineRestart/b95a13808560ce383c27ab6cb408cb9b/.tmp/info/7ab1d848093242daaf542db42daac059 as hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/default/TestLogRolling-testLogRollOnPipelineRestart/b95a13808560ce383c27ab6cb408cb9b/info/7ab1d848093242daaf542db42daac059 2024-11-18T22:43:42,705 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:42,709 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/default/TestLogRolling-testLogRollOnPipelineRestart/b95a13808560ce383c27ab6cb408cb9b/info/7ab1d848093242daaf542db42daac059, entries=4, sequenceid=8, filesize=9.1 K 2024-11-18T22:43:42,710 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for b95a13808560ce383c27ab6cb408cb9b in 177ms, sequenceid=8, compaction requested=false 2024-11-18T22:43:42,710 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for b95a13808560ce383c27ab6cb408cb9b: 2024-11-18T22:43:42,719 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T22:43:42,720 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T22:43:42,720 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T22:43:42,720 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:43:42,721 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:43:42,721 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T22:43:42,721 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T22:43:42,722 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=87887211, stopped=false 2024-11-18T22:43:42,722 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3b0ca91ed64a,33747,1731969794364 2024-11-18T22:43:42,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T22:43:42,749 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T22:43:42,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:43:42,749 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T22:43:42,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101501dbcc70001, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T22:43:42,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101501dbcc70001, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:43:42,749 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T22:43:42,749 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:43:42,750 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3b0ca91ed64a,34791,1731969794791' ***** 2024-11-18T22:43:42,750 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T22:43:42,750 INFO [RS:0;3b0ca91ed64a:34791 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T22:43:42,750 INFO [RS:0;3b0ca91ed64a:34791 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T22:43:42,750 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T22:43:42,750 INFO [RS:0;3b0ca91ed64a:34791 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T22:43:42,750 INFO [RS:0;3b0ca91ed64a:34791 {}] regionserver.HRegionServer(3091): Received CLOSE for b95a13808560ce383c27ab6cb408cb9b 2024-11-18T22:43:42,751 INFO [RS:0;3b0ca91ed64a:34791 {}] regionserver.HRegionServer(959): stopping server 3b0ca91ed64a,34791,1731969794791 2024-11-18T22:43:42,751 INFO [RS:0;3b0ca91ed64a:34791 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T22:43:42,751 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:43:42,751 INFO [RS:0;3b0ca91ed64a:34791 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3b0ca91ed64a:34791. 2024-11-18T22:43:42,751 DEBUG [RS:0;3b0ca91ed64a:34791 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T22:43:42,751 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing b95a13808560ce383c27ab6cb408cb9b, disabling compactions & flushes 2024-11-18T22:43:42,751 DEBUG [RS:0;3b0ca91ed64a:34791 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:43:42,751 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731969796832.b95a13808560ce383c27ab6cb408cb9b. 2024-11-18T22:43:42,751 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731969796832.b95a13808560ce383c27ab6cb408cb9b. 2024-11-18T22:43:42,751 INFO [RS:0;3b0ca91ed64a:34791 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T22:43:42,751 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731969796832.b95a13808560ce383c27ab6cb408cb9b. after waiting 0 ms 2024-11-18T22:43:42,751 INFO [RS:0;3b0ca91ed64a:34791 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T22:43:42,751 INFO [RS:0;3b0ca91ed64a:34791 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T22:43:42,751 INFO [RS:0;3b0ca91ed64a:34791 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T22:43:42,751 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731969796832.b95a13808560ce383c27ab6cb408cb9b. 2024-11-18T22:43:42,751 INFO [RS:0;3b0ca91ed64a:34791 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-18T22:43:42,751 DEBUG [RS:0;3b0ca91ed64a:34791 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, b95a13808560ce383c27ab6cb408cb9b=TestLogRolling-testLogRollOnPipelineRestart,,1731969796832.b95a13808560ce383c27ab6cb408cb9b.} 2024-11-18T22:43:42,751 DEBUG [RS:0;3b0ca91ed64a:34791 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, b95a13808560ce383c27ab6cb408cb9b 2024-11-18T22:43:42,752 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T22:43:42,752 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T22:43:42,752 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T22:43:42,752 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T22:43:42,752 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T22:43:42,753 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34791-0x101501dbcc70001, quorum=127.0.0.1:54626, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:43:42,775 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/default/TestLogRolling-testLogRollOnPipelineRestart/b95a13808560ce383c27ab6cb408cb9b/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-18T22:43:42,775 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731969796832.b95a13808560ce383c27ab6cb408cb9b. 2024-11-18T22:43:42,776 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for b95a13808560ce383c27ab6cb408cb9b: Waiting for close lock at 1731969822751Running coprocessor pre-close hooks at 1731969822751Disabling compacts and flushes for region at 1731969822751Disabling writes for close at 1731969822751Writing region close event to WAL at 1731969822759 (+8 ms)Running coprocessor post-close hooks at 1731969822775 (+16 ms)Closed at 1731969822775 2024-11-18T22:43:42,776 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731969796832.b95a13808560ce383c27ab6cb408cb9b. 2024-11-18T22:43:42,779 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-18T22:43:42,780 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T22:43:42,780 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T22:43:42,780 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731969822751Running coprocessor pre-close hooks at 1731969822751Disabling compacts and flushes for region at 1731969822751Disabling writes for close at 1731969822752 (+1 ms)Writing region close event to WAL at 1731969822772 (+20 ms)Running coprocessor post-close hooks at 1731969822779 (+7 ms)Closed at 1731969822780 (+1 ms) 2024-11-18T22:43:42,780 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T22:43:42,952 INFO [RS:0;3b0ca91ed64a:34791 {}] regionserver.HRegionServer(976): stopping server 3b0ca91ed64a,34791,1731969794791; all regions closed. 2024-11-18T22:43:42,952 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:42,955 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:42,955 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:42,955 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:42,955 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:42,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44141 is added to blk_1073741840_1023 (size=825) 2024-11-18T22:43:42,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741840_1023 (size=825) 2024-11-18T22:43:42,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:43,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:43,229 INFO [regionserver/3b0ca91ed64a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-18T22:43:43,229 INFO [regionserver/3b0ca91ed64a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-18T22:43:43,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:43,992 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:44,164 INFO [regionserver/3b0ca91ed64a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T22:43:44,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:44,328 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T22:43:44,706 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:44,993 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:45,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:45,707 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:45,849 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-18T22:43:45,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:46,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:46,320 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.meta.1731969796625.meta after 4002ms 2024-11-18T22:43:46,321 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/WALs/3b0ca91ed64a,34791,1731969794791/3b0ca91ed64a%2C34791%2C1731969794791.meta.1731969796625.meta to hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/oldWALs/3b0ca91ed64a%2C34791%2C1731969794791.meta.1731969796625.meta 2024-11-18T22:43:46,330 DEBUG [RS:0;3b0ca91ed64a:34791 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/oldWALs 2024-11-18T22:43:46,330 INFO [RS:0;3b0ca91ed64a:34791 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3b0ca91ed64a%2C34791%2C1731969794791.meta:.meta(num 1731969822297) 2024-11-18T22:43:46,340 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:46,345 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:46,348 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:46,348 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:46,349 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:46,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741844_1028 (size=1162) 2024-11-18T22:43:46,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44141 is added to blk_1073741844_1028 (size=1162) 2024-11-18T22:43:46,397 DEBUG [RS:0;3b0ca91ed64a:34791 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/oldWALs 2024-11-18T22:43:46,397 INFO [RS:0;3b0ca91ed64a:34791 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3b0ca91ed64a%2C34791%2C1731969794791:(num 1731969822540) 2024-11-18T22:43:46,397 DEBUG [RS:0;3b0ca91ed64a:34791 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:43:46,397 INFO [RS:0;3b0ca91ed64a:34791 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T22:43:46,397 INFO [RS:0;3b0ca91ed64a:34791 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T22:43:46,397 INFO [RS:0;3b0ca91ed64a:34791 {}] hbase.ChoreService(370): Chore service for: regionserver/3b0ca91ed64a:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-18T22:43:46,397 INFO [RS:0;3b0ca91ed64a:34791 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T22:43:46,397 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T22:43:46,398 INFO [RS:0;3b0ca91ed64a:34791 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34791 2024-11-18T22:43:46,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T22:43:46,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101501dbcc70001, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3b0ca91ed64a,34791,1731969794791 2024-11-18T22:43:46,563 INFO [RS:0;3b0ca91ed64a:34791 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T22:43:46,564 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3b0ca91ed64a,34791,1731969794791] 2024-11-18T22:43:46,592 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3b0ca91ed64a,34791,1731969794791 already deleted, retry=false 2024-11-18T22:43:46,592 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3b0ca91ed64a,34791,1731969794791 expired; onlineServers=0 2024-11-18T22:43:46,592 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3b0ca91ed64a,33747,1731969794364' ***** 2024-11-18T22:43:46,592 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T22:43:46,592 INFO [M:0;3b0ca91ed64a:33747 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T22:43:46,592 INFO [M:0;3b0ca91ed64a:33747 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T22:43:46,592 DEBUG [M:0;3b0ca91ed64a:33747 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T22:43:46,592 DEBUG [M:0;3b0ca91ed64a:33747 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T22:43:46,593 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T22:43:46,593 INFO [M:0;3b0ca91ed64a:33747 {}] hbase.ChoreService(370): Chore service for: master/3b0ca91ed64a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T22:43:46,593 INFO [M:0;3b0ca91ed64a:33747 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T22:43:46,593 DEBUG [M:0;3b0ca91ed64a:33747 {}] master.HMaster(1795): Stopping service threads 2024-11-18T22:43:46,593 INFO [M:0;3b0ca91ed64a:33747 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T22:43:46,593 INFO [M:0;3b0ca91ed64a:33747 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T22:43:46,593 INFO [M:0;3b0ca91ed64a:33747 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T22:43:46,593 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T22:43:46,596 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.large.0-1731969795992 {}] cleaner.HFileCleaner(306): Exit Thread[master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.large.0-1731969795992,5,FailOnTimeoutGroup] 2024-11-18T22:43:46,596 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.small.0-1731969795993 {}] cleaner.HFileCleaner(306): Exit Thread[master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.small.0-1731969795993,5,FailOnTimeoutGroup] 2024-11-18T22:43:46,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T22:43:46,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:43:46,616 DEBUG [M:0;3b0ca91ed64a:33747 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-18T22:43:46,616 DEBUG [M:0;3b0ca91ed64a:33747 {}] master.ActiveMasterManager(353): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-18T22:43:46,617 INFO [M:0;3b0ca91ed64a:33747 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/.lastflushedseqids 2024-11-18T22:43:46,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741846_1030 (size=120) 2024-11-18T22:43:46,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44141 is added to blk_1073741846_1030 (size=120) 2024-11-18T22:43:46,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101501dbcc70001, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:43:46,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34791-0x101501dbcc70001, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:43:46,681 INFO [RS:0;3b0ca91ed64a:34791 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T22:43:46,681 INFO [RS:0;3b0ca91ed64a:34791 {}] regionserver.HRegionServer(1031): Exiting; stopping=3b0ca91ed64a,34791,1731969794791; zookeeper connection closed. 2024-11-18T22:43:46,681 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@367bbd1a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@367bbd1a 2024-11-18T22:43:46,684 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T22:43:46,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:46,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:47,036 INFO [M:0;3b0ca91ed64a:33747 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T22:43:47,036 INFO [M:0;3b0ca91ed64a:33747 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T22:43:47,036 DEBUG [M:0;3b0ca91ed64a:33747 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T22:43:47,036 INFO [M:0;3b0ca91ed64a:33747 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:43:47,036 DEBUG [M:0;3b0ca91ed64a:33747 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:43:47,036 DEBUG [M:0;3b0ca91ed64a:33747 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T22:43:47,036 DEBUG [M:0;3b0ca91ed64a:33747 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:43:47,037 INFO [M:0;3b0ca91ed64a:33747 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-11-18T22:43:47,037 ERROR [FSHLog-0-hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData-prefix:3b0ca91ed64a,33747,1731969794364 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32993,DS-a27c901c-d30f-4b69-b5bd-974e2eaa7114,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:47,037 WARN [FSHLog-0-hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData-prefix:3b0ca91ed64a,33747,1731969794364 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32993,DS-a27c901c-d30f-4b69-b5bd-974e2eaa7114,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:47,037 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 3b0ca91ed64a%2C33747%2C1731969794364:(num 1731969795552) roll requested 2024-11-18T22:43:47,037 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C33747%2C1731969794364.1731969827037 2024-11-18T22:43:47,048 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:47,052 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:47,052 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:47,053 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:47,053 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:47,053 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/WALs/3b0ca91ed64a,33747,1731969794364/3b0ca91ed64a%2C33747%2C1731969794364.1731969795552 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/WALs/3b0ca91ed64a,33747,1731969794364/3b0ca91ed64a%2C33747%2C1731969794364.1731969827037 2024-11-18T22:43:47,060 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32993,DS-a27c901c-d30f-4b69-b5bd-974e2eaa7114,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:47,060 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:32993,DS-a27c901c-d30f-4b69-b5bd-974e2eaa7114,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-18T22:43:47,060 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/WALs/3b0ca91ed64a,33747,1731969794364/3b0ca91ed64a%2C33747%2C1731969794364.1731969795552 2024-11-18T22:43:47,060 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45259:45259),(127.0.0.1/127.0.0.1:38759:38759)] 2024-11-18T22:43:47,060 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/WALs/3b0ca91ed64a,33747,1731969794364/3b0ca91ed64a%2C33747%2C1731969794364.1731969795552 is not closed yet, will try archiving it next time 2024-11-18T22:43:47,061 WARN [IPC Server handler 2 on default port 46163 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/WALs/3b0ca91ed64a,33747,1731969794364/3b0ca91ed64a%2C33747%2C1731969794364.1731969795552 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-18T22:43:47,061 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/WALs/3b0ca91ed64a,33747,1731969794364/3b0ca91ed64a%2C33747%2C1731969794364.1731969795552 after 1ms 2024-11-18T22:43:47,087 DEBUG [M:0;3b0ca91ed64a:33747 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f794fc1bda1d4cd4ab0a6d3a723d6a2d is 82, key is hbase:meta,,1/info:regioninfo/1731969796675/Put/seqid=0 2024-11-18T22:43:47,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741848_1033 (size=5672) 2024-11-18T22:43:47,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44141 is added to blk_1073741848_1033 (size=5672) 2024-11-18T22:43:47,113 INFO [M:0;3b0ca91ed64a:33747 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f794fc1bda1d4cd4ab0a6d3a723d6a2d 2024-11-18T22:43:47,147 DEBUG [M:0;3b0ca91ed64a:33747 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/58821d2f6cd7416a8ea142c5ca5512d7 is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731969797681/Put/seqid=0 2024-11-18T22:43:47,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:47,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741849_1034 (size=6118) 2024-11-18T22:43:47,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44141 is added to blk_1073741849_1034 (size=6118) 2024-11-18T22:43:47,175 INFO [M:0;3b0ca91ed64a:33747 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/58821d2f6cd7416a8ea142c5ca5512d7 2024-11-18T22:43:47,203 DEBUG [M:0;3b0ca91ed64a:33747 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6c38669580634383906eefc5639c39d0 is 69, key is 3b0ca91ed64a,34791,1731969794791/rs:state/1731969796109/Put/seqid=0 2024-11-18T22:43:47,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44141 is added to blk_1073741850_1035 (size=5156) 2024-11-18T22:43:47,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741850_1035 (size=5156) 2024-11-18T22:43:47,214 INFO [M:0;3b0ca91ed64a:33747 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6c38669580634383906eefc5639c39d0 2024-11-18T22:43:47,235 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T22:43:47,235 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T22:43:47,236 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-18T22:43:47,242 DEBUG [M:0;3b0ca91ed64a:33747 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/01eca890dc514af39e8f2e70765f06e2 is 52, key is load_balancer_on/state:d/1731969796825/Put/seqid=0 2024-11-18T22:43:47,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44141 is added to blk_1073741851_1036 (size=5056) 2024-11-18T22:43:47,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741851_1036 (size=5056) 2024-11-18T22:43:47,251 INFO [M:0;3b0ca91ed64a:33747 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/01eca890dc514af39e8f2e70765f06e2 2024-11-18T22:43:47,266 DEBUG [M:0;3b0ca91ed64a:33747 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f794fc1bda1d4cd4ab0a6d3a723d6a2d as hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f794fc1bda1d4cd4ab0a6d3a723d6a2d 2024-11-18T22:43:47,290 INFO [M:0;3b0ca91ed64a:33747 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f794fc1bda1d4cd4ab0a6d3a723d6a2d, entries=8, sequenceid=56, filesize=5.5 K 2024-11-18T22:43:47,292 DEBUG [M:0;3b0ca91ed64a:33747 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/58821d2f6cd7416a8ea142c5ca5512d7 as hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/58821d2f6cd7416a8ea142c5ca5512d7 2024-11-18T22:43:47,299 INFO [M:0;3b0ca91ed64a:33747 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/58821d2f6cd7416a8ea142c5ca5512d7, entries=6, sequenceid=56, filesize=6.0 K 2024-11-18T22:43:47,300 DEBUG [M:0;3b0ca91ed64a:33747 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6c38669580634383906eefc5639c39d0 as hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6c38669580634383906eefc5639c39d0 2024-11-18T22:43:47,307 INFO [M:0;3b0ca91ed64a:33747 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6c38669580634383906eefc5639c39d0, entries=1, sequenceid=56, filesize=5.0 K 2024-11-18T22:43:47,308 DEBUG [M:0;3b0ca91ed64a:33747 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/01eca890dc514af39e8f2e70765f06e2 as hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/01eca890dc514af39e8f2e70765f06e2 2024-11-18T22:43:47,317 INFO [M:0;3b0ca91ed64a:33747 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/01eca890dc514af39e8f2e70765f06e2, entries=1, sequenceid=56, filesize=4.9 K 2024-11-18T22:43:47,320 INFO [M:0;3b0ca91ed64a:33747 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 284ms, sequenceid=56, compaction requested=false 2024-11-18T22:43:47,333 INFO [M:0;3b0ca91ed64a:33747 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:43:47,333 DEBUG [M:0;3b0ca91ed64a:33747 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731969827036Disabling compacts and flushes for region at 1731969827036Disabling writes for close at 1731969827036Obtaining lock to block concurrent updates at 1731969827037 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731969827037Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1731969827037Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731969827065 (+28 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731969827065Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731969827086 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731969827086Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731969827125 (+39 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731969827147 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731969827147Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731969827185 (+38 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731969827202 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731969827202Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731969827221 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731969827241 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731969827241Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7f6535d3: reopening flushed file at 1731969827258 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1b0bdb2c: reopening flushed file at 1731969827290 (+32 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@25870034: reopening flushed file at 1731969827299 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@71b5406d: reopening flushed file at 1731969827307 (+8 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 284ms, sequenceid=56, compaction requested=false at 1731969827320 (+13 ms)Writing region close event to WAL at 1731969827333 (+13 ms)Closed at 1731969827333 2024-11-18T22:43:47,334 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:47,334 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:47,335 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:47,335 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:47,335 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:43:47,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35221 is added to blk_1073741847_1031 (size=757) 2024-11-18T22:43:47,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44141 is added to blk_1073741847_1031 (size=757) 2024-11-18T22:43:47,708 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:47,798 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:47,798 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:47,798 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:47,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:47,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:47,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:47,803 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:47,804 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:47,804 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:47,807 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:47,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:47,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:47,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:48,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:48,319 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T22:43:48,321 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:48,322 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:48,357 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:48,358 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:48,358 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:48,359 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:48,360 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:48,361 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:48,366 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:48,367 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:48,367 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:48,372 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:43:48,709 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:48,850 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-18T22:43:48,996 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:49,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:49,710 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:49,997 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:50,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:50,711 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:50,998 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:51,062 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/WALs/3b0ca91ed64a,33747,1731969794364/3b0ca91ed64a%2C33747%2C1731969794364.1731969795552 after 4002ms 2024-11-18T22:43:51,067 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/WALs/3b0ca91ed64a,33747,1731969794364/3b0ca91ed64a%2C33747%2C1731969794364.1731969795552 to hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/oldWALs/3b0ca91ed64a%2C33747%2C1731969794364.1731969795552 2024-11-18T22:43:51,078 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/MasterData/oldWALs/3b0ca91ed64a%2C33747%2C1731969794364.1731969795552 to hdfs://localhost:46163/user/jenkins/test-data/dd1f99e6-7f72-b9f7-ed0b-9657c764027a/oldWALs/3b0ca91ed64a%2C33747%2C1731969794364.1731969795552$masterlocalwal$ 2024-11-18T22:43:51,078 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T22:43:51,078 INFO [M:0;3b0ca91ed64a:33747 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T22:43:51,078 INFO [M:0;3b0ca91ed64a:33747 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33747 2024-11-18T22:43:51,078 INFO [M:0;3b0ca91ed64a:33747 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T22:43:51,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:51,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:43:51,270 INFO [M:0;3b0ca91ed64a:33747 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T22:43:51,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33747-0x101501dbcc70000, quorum=127.0.0.1:54626, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:43:51,301 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4e65fed6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:43:51,302 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1e0f5c1e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:43:51,302 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:43:51,308 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5295748e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:43:51,309 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6edb8d2f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/hadoop.log.dir/,STOPPED} 2024-11-18T22:43:51,336 WARN [BP-943418849-172.17.0.2-1731969791211 heartbeating to localhost/127.0.0.1:46163 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T22:43:51,336 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T22:43:51,336 WARN [BP-943418849-172.17.0.2-1731969791211 heartbeating to localhost/127.0.0.1:46163 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-943418849-172.17.0.2-1731969791211 (Datanode Uuid 0fe48884-7c43-4857-b711-5a2fa75e1401) service to localhost/127.0.0.1:46163 2024-11-18T22:43:51,336 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T22:43:51,337 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/cluster_43b4729f-c3e3-e4e6-4e36-b4a5b35a5e02/data/data3/current/BP-943418849-172.17.0.2-1731969791211 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:43:51,338 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T22:43:51,338 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/cluster_43b4729f-c3e3-e4e6-4e36-b4a5b35a5e02/data/data4/current/BP-943418849-172.17.0.2-1731969791211 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:43:51,357 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5efc28c8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:43:51,362 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@75d818fe{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:43:51,362 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:43:51,362 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65accf8e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:43:51,362 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@727fa385{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/hadoop.log.dir/,STOPPED} 2024-11-18T22:43:51,373 WARN [BP-943418849-172.17.0.2-1731969791211 heartbeating to localhost/127.0.0.1:46163 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T22:43:51,373 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T22:43:51,373 WARN [BP-943418849-172.17.0.2-1731969791211 heartbeating to localhost/127.0.0.1:46163 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-943418849-172.17.0.2-1731969791211 (Datanode Uuid f82ec9c5-3348-4999-82aa-fb9b16116667) service to localhost/127.0.0.1:46163 2024-11-18T22:43:51,373 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T22:43:51,374 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/cluster_43b4729f-c3e3-e4e6-4e36-b4a5b35a5e02/data/data1/current/BP-943418849-172.17.0.2-1731969791211 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:43:51,374 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/cluster_43b4729f-c3e3-e4e6-4e36-b4a5b35a5e02/data/data2/current/BP-943418849-172.17.0.2-1731969791211 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:43:51,374 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T22:43:51,413 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4658354a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T22:43:51,428 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2bdc2a53{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:43:51,429 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:43:51,430 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@58a42b34{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:43:51,431 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@265efa03{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/hadoop.log.dir/,STOPPED} 2024-11-18T22:43:51,474 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T22:43:51,511 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T22:43:51,529 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=181 (was 151) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46163 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46163 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46163 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:46163 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:46163 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46163 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:46163 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:46163 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 436) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=960 (was 1062), ProcessCount=11 (was 11), AvailableMemoryMB=6784 (was 6782) - AvailableMemoryMB LEAK? - 2024-11-18T22:43:51,542 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=181, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=960, ProcessCount=11, AvailableMemoryMB=6780 2024-11-18T22:43:51,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T22:43:51,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/hadoop.log.dir so I do NOT create it in target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2 2024-11-18T22:43:51,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/40d5e0b1-22d2-05f8-9e89-5975d3c885d2/hadoop.tmp.dir so I do NOT create it in target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2 2024-11-18T22:43:51,543 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/cluster_08859a72-e1c7-f223-88c0-d524e7973c58, deleteOnExit=true 2024-11-18T22:43:51,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T22:43:51,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/test.cache.data in system properties and HBase conf 2024-11-18T22:43:51,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T22:43:51,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/hadoop.log.dir in system properties and HBase conf 2024-11-18T22:43:51,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T22:43:51,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T22:43:51,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T22:43:51,544 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T22:43:51,544 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T22:43:51,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T22:43:51,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T22:43:51,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T22:43:51,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T22:43:51,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T22:43:51,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T22:43:51,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T22:43:51,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T22:43:51,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/nfs.dump.dir in system properties and HBase conf 2024-11-18T22:43:51,545 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/java.io.tmpdir in system properties and HBase conf 2024-11-18T22:43:51,546 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T22:43:51,546 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T22:43:51,546 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T22:43:51,565 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T22:43:51,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:51,998 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:52,176 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:52,350 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:43:52,360 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:43:52,376 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:43:52,376 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:43:52,376 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T22:43:52,382 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:43:52,386 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6dae72b0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:43:52,386 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2898a7f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:43:52,564 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@97a3b39{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/java.io.tmpdir/jetty-localhost-35001-hadoop-hdfs-3_4_1-tests_jar-_-any-3426432003459247894/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T22:43:52,572 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@14cf53f6{HTTP/1.1, (http/1.1)}{localhost:35001} 2024-11-18T22:43:52,572 INFO [Time-limited test {}] server.Server(415): Started @226372ms 2024-11-18T22:43:52,589 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T22:43:52,712 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:52,999 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:53,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:53,199 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:43:53,213 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:43:53,233 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:43:53,233 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:43:53,233 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T22:43:53,237 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65618dbb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:43:53,238 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@58200445{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:43:53,423 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@dac4784{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/java.io.tmpdir/jetty-localhost-35709-hadoop-hdfs-3_4_1-tests_jar-_-any-1327830045853626222/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:43:53,428 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3f8c1b5d{HTTP/1.1, (http/1.1)}{localhost:35709} 2024-11-18T22:43:53,428 INFO [Time-limited test {}] server.Server(415): Started @227228ms 2024-11-18T22:43:53,430 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T22:43:53,608 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:43:53,616 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:43:53,627 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:43:53,627 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:43:53,627 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T22:43:53,629 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@30b562f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:43:53,629 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5559c5a9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:43:53,716 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:53,784 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3a860b8d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/java.io.tmpdir/jetty-localhost-45075-hadoop-hdfs-3_4_1-tests_jar-_-any-977017479649574093/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:43:53,785 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4c200310{HTTP/1.1, (http/1.1)}{localhost:45075} 2024-11-18T22:43:53,785 INFO [Time-limited test {}] server.Server(415): Started @227585ms 2024-11-18T22:43:53,787 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T22:43:54,000 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:54,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:54,612 WARN [Thread-1676 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/cluster_08859a72-e1c7-f223-88c0-d524e7973c58/data/data2/current/BP-41218721-172.17.0.2-1731969831579/current, will proceed with Du for space computation calculation, 2024-11-18T22:43:54,613 WARN [Thread-1675 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/cluster_08859a72-e1c7-f223-88c0-d524e7973c58/data/data1/current/BP-41218721-172.17.0.2-1731969831579/current, will proceed with Du for space computation calculation, 2024-11-18T22:43:54,654 WARN [Thread-1640 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T22:43:54,660 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x433bf384f1d03c28 with lease ID 0xc87fee21be00410d: Processing first storage report for DS-52bfe490-83e7-43a7-a42e-8619d257bd0b from datanode DatanodeRegistration(127.0.0.1:43211, datanodeUuid=ae5d9546-f9dd-41e6-b147-bd70c34f98ee, infoPort=44773, infoSecurePort=0, ipcPort=42263, storageInfo=lv=-57;cid=testClusterID;nsid=1127651795;c=1731969831579) 2024-11-18T22:43:54,660 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x433bf384f1d03c28 with lease ID 0xc87fee21be00410d: from storage DS-52bfe490-83e7-43a7-a42e-8619d257bd0b node DatanodeRegistration(127.0.0.1:43211, datanodeUuid=ae5d9546-f9dd-41e6-b147-bd70c34f98ee, infoPort=44773, infoSecurePort=0, ipcPort=42263, storageInfo=lv=-57;cid=testClusterID;nsid=1127651795;c=1731969831579), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:43:54,660 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x433bf384f1d03c28 with lease ID 0xc87fee21be00410d: Processing first storage report for DS-6aeffde0-8509-4d1b-8991-2e274f184be4 from datanode DatanodeRegistration(127.0.0.1:43211, datanodeUuid=ae5d9546-f9dd-41e6-b147-bd70c34f98ee, infoPort=44773, infoSecurePort=0, ipcPort=42263, storageInfo=lv=-57;cid=testClusterID;nsid=1127651795;c=1731969831579) 2024-11-18T22:43:54,660 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x433bf384f1d03c28 with lease ID 0xc87fee21be00410d: from storage DS-6aeffde0-8509-4d1b-8991-2e274f184be4 node DatanodeRegistration(127.0.0.1:43211, datanodeUuid=ae5d9546-f9dd-41e6-b147-bd70c34f98ee, infoPort=44773, infoSecurePort=0, ipcPort=42263, storageInfo=lv=-57;cid=testClusterID;nsid=1127651795;c=1731969831579), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:43:54,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:54,717 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 after 68098ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor209.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:43:54,942 WARN [Thread-1687 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/cluster_08859a72-e1c7-f223-88c0-d524e7973c58/data/data3/current/BP-41218721-172.17.0.2-1731969831579/current, will proceed with Du for space computation calculation, 2024-11-18T22:43:54,948 WARN [Thread-1688 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/cluster_08859a72-e1c7-f223-88c0-d524e7973c58/data/data4/current/BP-41218721-172.17.0.2-1731969831579/current, will proceed with Du for space computation calculation, 2024-11-18T22:43:54,997 WARN [Thread-1663 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T22:43:55,001 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:55,004 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3e08c9aad55a11bf with lease ID 0xc87fee21be00410e: Processing first storage report for DS-42ec161c-d205-476e-8db5-9a86352fb10c from datanode DatanodeRegistration(127.0.0.1:38609, datanodeUuid=2f0b7112-0e90-49df-8dcf-68ef96070da1, infoPort=37945, infoSecurePort=0, ipcPort=43045, storageInfo=lv=-57;cid=testClusterID;nsid=1127651795;c=1731969831579) 2024-11-18T22:43:55,004 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3e08c9aad55a11bf with lease ID 0xc87fee21be00410e: from storage DS-42ec161c-d205-476e-8db5-9a86352fb10c node DatanodeRegistration(127.0.0.1:38609, datanodeUuid=2f0b7112-0e90-49df-8dcf-68ef96070da1, infoPort=37945, infoSecurePort=0, ipcPort=43045, storageInfo=lv=-57;cid=testClusterID;nsid=1127651795;c=1731969831579), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:43:55,004 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3e08c9aad55a11bf with lease ID 0xc87fee21be00410e: Processing first storage report for DS-f1d8b6a6-b2bf-4c29-bc8a-8d2a97659c47 from datanode DatanodeRegistration(127.0.0.1:38609, datanodeUuid=2f0b7112-0e90-49df-8dcf-68ef96070da1, infoPort=37945, infoSecurePort=0, ipcPort=43045, storageInfo=lv=-57;cid=testClusterID;nsid=1127651795;c=1731969831579) 2024-11-18T22:43:55,004 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3e08c9aad55a11bf with lease ID 0xc87fee21be00410e: from storage DS-f1d8b6a6-b2bf-4c29-bc8a-8d2a97659c47 node DatanodeRegistration(127.0.0.1:38609, datanodeUuid=2f0b7112-0e90-49df-8dcf-68ef96070da1, infoPort=37945, infoSecurePort=0, ipcPort=43045, storageInfo=lv=-57;cid=testClusterID;nsid=1127651795;c=1731969831579), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:43:55,052 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2 2024-11-18T22:43:55,073 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/cluster_08859a72-e1c7-f223-88c0-d524e7973c58/zookeeper_0, clientPort=57150, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/cluster_08859a72-e1c7-f223-88c0-d524e7973c58/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/cluster_08859a72-e1c7-f223-88c0-d524e7973c58/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T22:43:55,081 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57150 2024-11-18T22:43:55,081 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:43:55,083 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:43:55,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741825_1001 (size=7) 2024-11-18T22:43:55,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741825_1001 (size=7) 2024-11-18T22:43:55,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:55,504 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d with version=8 2024-11-18T22:43:55,504 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/hbase-staging 2024-11-18T22:43:55,506 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3b0ca91ed64a:0 server-side Connection retries=45 2024-11-18T22:43:55,507 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:43:55,507 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T22:43:55,507 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T22:43:55,507 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:43:55,507 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T22:43:55,507 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T22:43:55,507 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T22:43:55,508 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38543 2024-11-18T22:43:55,510 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38543 connecting to ZooKeeper ensemble=127.0.0.1:57150 2024-11-18T22:43:55,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:385430x0, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T22:43:55,589 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38543-0x101501e5bde0000 connected 2024-11-18T22:43:55,718 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:55,992 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:43:55,994 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:43:55,996 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:43:55,997 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d, hbase.cluster.distributed=false 2024-11-18T22:43:55,999 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T22:43:56,000 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38543 2024-11-18T22:43:56,002 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:56,004 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38543 2024-11-18T22:43:56,004 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38543 2024-11-18T22:43:56,009 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38543 2024-11-18T22:43:56,009 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38543 2024-11-18T22:43:56,027 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3b0ca91ed64a:0 server-side Connection retries=45 2024-11-18T22:43:56,027 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:43:56,027 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T22:43:56,027 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T22:43:56,027 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:43:56,027 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T22:43:56,027 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T22:43:56,027 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T22:43:56,029 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36431 2024-11-18T22:43:56,031 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36431 connecting to ZooKeeper ensemble=127.0.0.1:57150 2024-11-18T22:43:56,032 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:43:56,034 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:43:56,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:56,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:364310x0, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T22:43:56,188 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:364310x0, quorum=127.0.0.1:57150, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:43:56,188 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T22:43:56,192 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36431-0x101501e5bde0001 connected 2024-11-18T22:43:56,193 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T22:43:56,194 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36431-0x101501e5bde0001, quorum=127.0.0.1:57150, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T22:43:56,195 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36431-0x101501e5bde0001, quorum=127.0.0.1:57150, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T22:43:56,199 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36431 2024-11-18T22:43:56,199 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36431 2024-11-18T22:43:56,199 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36431 2024-11-18T22:43:56,203 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36431 2024-11-18T22:43:56,203 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36431 2024-11-18T22:43:56,220 DEBUG [M:0;3b0ca91ed64a:38543 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3b0ca91ed64a:38543 2024-11-18T22:43:56,224 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3b0ca91ed64a,38543,1731969835506 2024-11-18T22:43:56,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36431-0x101501e5bde0001, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T22:43:56,344 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T22:43:56,347 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3b0ca91ed64a,38543,1731969835506 2024-11-18T22:43:56,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36431-0x101501e5bde0001, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T22:43:56,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:43:56,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36431-0x101501e5bde0001, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:43:56,566 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T22:43:56,567 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3b0ca91ed64a,38543,1731969835506 from backup master directory 2024-11-18T22:43:56,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3b0ca91ed64a,38543,1731969835506 2024-11-18T22:43:56,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36431-0x101501e5bde0001, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T22:43:56,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T22:43:56,622 WARN [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T22:43:56,622 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3b0ca91ed64a,38543,1731969835506 2024-11-18T22:43:56,627 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/hbase.id] with ID: 97b46833-c0c5-46e3-9b2e-8043f85a1975 2024-11-18T22:43:56,627 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/.tmp/hbase.id 2024-11-18T22:43:56,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741826_1002 (size=42) 2024-11-18T22:43:56,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741826_1002 (size=42) 2024-11-18T22:43:56,634 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/.tmp/hbase.id]:[hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/hbase.id] 2024-11-18T22:43:56,647 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:43:56,647 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T22:43:56,648 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-18T22:43:56,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:56,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:43:56,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36431-0x101501e5bde0001, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:43:56,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741827_1003 (size=196) 2024-11-18T22:43:56,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741827_1003 (size=196) 2024-11-18T22:43:56,740 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T22:43:56,741 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T22:43:56,741 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T22:43:56,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741828_1004 (size=1189) 2024-11-18T22:43:56,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741828_1004 (size=1189) 2024-11-18T22:43:57,002 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:57,172 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/data/master/store 2024-11-18T22:43:57,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:57,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741829_1005 (size=34) 2024-11-18T22:43:57,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741829_1005 (size=34) 2024-11-18T22:43:57,188 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:43:57,188 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T22:43:57,188 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:43:57,188 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:43:57,188 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T22:43:57,188 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:43:57,188 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:43:57,188 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731969837188Disabling compacts and flushes for region at 1731969837188Disabling writes for close at 1731969837188Writing region close event to WAL at 1731969837188Closed at 1731969837188 2024-11-18T22:43:57,190 WARN [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/data/master/store/.initializing 2024-11-18T22:43:57,190 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/WALs/3b0ca91ed64a,38543,1731969835506 2024-11-18T22:43:57,193 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3b0ca91ed64a%2C38543%2C1731969835506, suffix=, logDir=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/WALs/3b0ca91ed64a,38543,1731969835506, archiveDir=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/oldWALs, maxLogs=10 2024-11-18T22:43:57,194 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C38543%2C1731969835506.1731969837194 2024-11-18T22:43:57,212 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/WALs/3b0ca91ed64a,38543,1731969835506/3b0ca91ed64a%2C38543%2C1731969835506.1731969837194 2024-11-18T22:43:57,216 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37945:37945),(127.0.0.1/127.0.0.1:44773:44773)] 2024-11-18T22:43:57,217 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T22:43:57,218 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:43:57,218 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:43:57,218 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:43:57,225 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:43:57,227 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T22:43:57,227 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:57,228 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:43:57,228 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:43:57,230 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T22:43:57,230 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:57,233 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T22:43:57,233 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:43:57,235 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-18T22:43:57,241 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T22:43:57,241 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:57,244 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T22:43:57,244 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:43:57,246 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T22:43:57,246 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:57,249 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T22:43:57,249 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:43:57,250 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:43:57,251 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:43:57,253 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:43:57,253 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:43:57,254 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T22:43:57,255 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:43:57,257 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T22:43:57,258 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=863085, jitterRate=0.09747026860713959}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T22:43:57,259 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731969837218Initializing all the Stores at 1731969837224 (+6 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969837225 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969837225Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969837225Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969837225Cleaning up temporary data from old regions at 1731969837253 (+28 ms)Region opened successfully at 1731969837259 (+6 ms) 2024-11-18T22:43:57,259 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T22:43:57,264 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fa604e7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3b0ca91ed64a/172.17.0.2:0 2024-11-18T22:43:57,268 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T22:43:57,268 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T22:43:57,269 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T22:43:57,269 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T22:43:57,270 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-18T22:43:57,271 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T22:43:57,271 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T22:43:57,282 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T22:43:57,283 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T22:43:57,296 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T22:43:57,297 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T22:43:57,300 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T22:43:57,320 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T22:43:57,321 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T22:43:57,322 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T22:43:57,336 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T22:43:57,343 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T22:43:57,355 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T22:43:57,358 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T22:43:57,365 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T22:43:57,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T22:43:57,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36431-0x101501e5bde0001, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T22:43:57,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36431-0x101501e5bde0001, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:43:57,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:43:57,376 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3b0ca91ed64a,38543,1731969835506, sessionid=0x101501e5bde0000, setting cluster-up flag (Was=false) 2024-11-18T22:43:57,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:43:57,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36431-0x101501e5bde0001, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:43:57,439 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T22:43:57,448 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3b0ca91ed64a,38543,1731969835506 2024-11-18T22:43:57,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:43:57,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36431-0x101501e5bde0001, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:43:57,502 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T22:43:57,503 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3b0ca91ed64a,38543,1731969835506 2024-11-18T22:43:57,504 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T22:43:57,506 INFO [RS:0;3b0ca91ed64a:36431 {}] regionserver.HRegionServer(746): ClusterId : 97b46833-c0c5-46e3-9b2e-8043f85a1975 2024-11-18T22:43:57,506 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T22:43:57,506 DEBUG [RS:0;3b0ca91ed64a:36431 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T22:43:57,507 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T22:43:57,507 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T22:43:57,507 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3b0ca91ed64a,38543,1731969835506 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T22:43:57,513 DEBUG [RS:0;3b0ca91ed64a:36431 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T22:43:57,513 DEBUG [RS:0;3b0ca91ed64a:36431 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T22:43:57,513 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3b0ca91ed64a:0, corePoolSize=5, maxPoolSize=5 2024-11-18T22:43:57,513 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3b0ca91ed64a:0, corePoolSize=5, maxPoolSize=5 2024-11-18T22:43:57,513 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=5, maxPoolSize=5 2024-11-18T22:43:57,514 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=5, maxPoolSize=5 2024-11-18T22:43:57,514 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3b0ca91ed64a:0, corePoolSize=10, maxPoolSize=10 2024-11-18T22:43:57,514 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:43:57,514 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=2, maxPoolSize=2 2024-11-18T22:43:57,514 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:43:57,517 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T22:43:57,518 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T22:43:57,519 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:57,519 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T22:43:57,523 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731969867523 2024-11-18T22:43:57,523 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T22:43:57,523 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T22:43:57,523 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T22:43:57,523 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T22:43:57,523 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T22:43:57,523 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T22:43:57,523 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:57,524 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T22:43:57,524 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T22:43:57,525 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T22:43:57,526 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T22:43:57,526 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T22:43:57,532 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.large.0-1731969837526,5,FailOnTimeoutGroup] 2024-11-18T22:43:57,534 DEBUG [RS:0;3b0ca91ed64a:36431 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T22:43:57,535 DEBUG [RS:0;3b0ca91ed64a:36431 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64b6844d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3b0ca91ed64a/172.17.0.2:0 2024-11-18T22:43:57,540 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.small.0-1731969837532,5,FailOnTimeoutGroup] 2024-11-18T22:43:57,540 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:57,540 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T22:43:57,540 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:57,540 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:57,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741831_1007 (size=1321) 2024-11-18T22:43:57,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741831_1007 (size=1321) 2024-11-18T22:43:57,551 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T22:43:57,552 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d 2024-11-18T22:43:57,554 DEBUG [RS:0;3b0ca91ed64a:36431 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3b0ca91ed64a:36431 2024-11-18T22:43:57,554 INFO [RS:0;3b0ca91ed64a:36431 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T22:43:57,554 INFO [RS:0;3b0ca91ed64a:36431 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T22:43:57,554 DEBUG [RS:0;3b0ca91ed64a:36431 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T22:43:57,555 INFO [RS:0;3b0ca91ed64a:36431 {}] regionserver.HRegionServer(2659): reportForDuty to master=3b0ca91ed64a,38543,1731969835506 with port=36431, startcode=1731969836026 2024-11-18T22:43:57,555 DEBUG [RS:0;3b0ca91ed64a:36431 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T22:43:57,560 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45473, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T22:43:57,562 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38543 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3b0ca91ed64a,36431,1731969836026 2024-11-18T22:43:57,562 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38543 {}] master.ServerManager(517): Registering regionserver=3b0ca91ed64a,36431,1731969836026 2024-11-18T22:43:57,564 DEBUG [RS:0;3b0ca91ed64a:36431 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d 2024-11-18T22:43:57,564 DEBUG [RS:0;3b0ca91ed64a:36431 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41707 2024-11-18T22:43:57,564 DEBUG [RS:0;3b0ca91ed64a:36431 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T22:43:57,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741832_1008 (size=32) 2024-11-18T22:43:57,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741832_1008 (size=32) 2024-11-18T22:43:57,569 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:43:57,570 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T22:43:57,572 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T22:43:57,572 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:57,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T22:43:57,576 DEBUG [RS:0;3b0ca91ed64a:36431 {}] zookeeper.ZKUtil(111): regionserver:36431-0x101501e5bde0001, quorum=127.0.0.1:57150, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3b0ca91ed64a,36431,1731969836026 2024-11-18T22:43:57,576 WARN [RS:0;3b0ca91ed64a:36431 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T22:43:57,577 INFO [RS:0;3b0ca91ed64a:36431 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T22:43:57,577 DEBUG [RS:0;3b0ca91ed64a:36431 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/WALs/3b0ca91ed64a,36431,1731969836026 2024-11-18T22:43:57,577 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:43:57,578 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T22:43:57,581 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3b0ca91ed64a,36431,1731969836026] 2024-11-18T22:43:57,584 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T22:43:57,584 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:57,585 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:43:57,585 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T22:43:57,586 INFO [RS:0;3b0ca91ed64a:36431 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T22:43:57,586 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T22:43:57,587 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:57,587 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:43:57,587 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T22:43:57,588 INFO [RS:0;3b0ca91ed64a:36431 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T22:43:57,588 INFO [RS:0;3b0ca91ed64a:36431 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T22:43:57,588 INFO [RS:0;3b0ca91ed64a:36431 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:57,588 INFO [RS:0;3b0ca91ed64a:36431 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T22:43:57,592 INFO [RS:0;3b0ca91ed64a:36431 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T22:43:57,593 INFO [RS:0;3b0ca91ed64a:36431 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:57,593 DEBUG [RS:0;3b0ca91ed64a:36431 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:43:57,593 DEBUG [RS:0;3b0ca91ed64a:36431 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:43:57,593 DEBUG [RS:0;3b0ca91ed64a:36431 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:43:57,593 DEBUG [RS:0;3b0ca91ed64a:36431 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:43:57,593 DEBUG [RS:0;3b0ca91ed64a:36431 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:43:57,593 DEBUG [RS:0;3b0ca91ed64a:36431 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3b0ca91ed64a:0, corePoolSize=2, maxPoolSize=2 2024-11-18T22:43:57,593 DEBUG [RS:0;3b0ca91ed64a:36431 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:43:57,593 DEBUG [RS:0;3b0ca91ed64a:36431 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:43:57,593 DEBUG [RS:0;3b0ca91ed64a:36431 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:43:57,593 DEBUG [RS:0;3b0ca91ed64a:36431 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:43:57,593 DEBUG [RS:0;3b0ca91ed64a:36431 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:43:57,593 DEBUG [RS:0;3b0ca91ed64a:36431 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:43:57,593 DEBUG [RS:0;3b0ca91ed64a:36431 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3b0ca91ed64a:0, corePoolSize=3, maxPoolSize=3 2024-11-18T22:43:57,593 DEBUG [RS:0;3b0ca91ed64a:36431 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0, corePoolSize=3, maxPoolSize=3 2024-11-18T22:43:57,594 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T22:43:57,594 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:57,594 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:43:57,595 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T22:43:57,595 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/hbase/meta/1588230740 2024-11-18T22:43:57,595 INFO [RS:0;3b0ca91ed64a:36431 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:57,596 INFO [RS:0;3b0ca91ed64a:36431 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:57,596 INFO [RS:0;3b0ca91ed64a:36431 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:57,596 INFO [RS:0;3b0ca91ed64a:36431 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:57,596 INFO [RS:0;3b0ca91ed64a:36431 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:57,596 INFO [RS:0;3b0ca91ed64a:36431 {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,36431,1731969836026-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T22:43:57,596 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/hbase/meta/1588230740 2024-11-18T22:43:57,598 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T22:43:57,598 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T22:43:57,598 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T22:43:57,600 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T22:43:57,606 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T22:43:57,607 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=733568, jitterRate=-0.06722098588943481}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T22:43:57,608 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731969837569Initializing all the Stores at 1731969837570 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969837570Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969837570Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969837570Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969837570Cleaning up temporary data from old regions at 1731969837598 (+28 ms)Region opened successfully at 1731969837608 (+10 ms) 2024-11-18T22:43:57,608 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T22:43:57,608 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T22:43:57,608 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T22:43:57,608 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T22:43:57,608 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T22:43:57,617 INFO [RS:0;3b0ca91ed64a:36431 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T22:43:57,618 INFO [RS:0;3b0ca91ed64a:36431 {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,36431,1731969836026-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:57,618 INFO [RS:0;3b0ca91ed64a:36431 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:57,618 INFO [RS:0;3b0ca91ed64a:36431 {}] regionserver.Replication(171): 3b0ca91ed64a,36431,1731969836026 started 2024-11-18T22:43:57,620 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T22:43:57,620 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731969837608Disabling compacts and flushes for region at 1731969837608Disabling writes for close at 1731969837608Writing region close event to WAL at 1731969837620 (+12 ms)Closed at 1731969837620 2024-11-18T22:43:57,627 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T22:43:57,628 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T22:43:57,628 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T22:43:57,632 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T22:43:57,634 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T22:43:57,646 INFO [RS:0;3b0ca91ed64a:36431 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:57,647 INFO [RS:0;3b0ca91ed64a:36431 {}] regionserver.HRegionServer(1482): Serving as 3b0ca91ed64a,36431,1731969836026, RpcServer on 3b0ca91ed64a/172.17.0.2:36431, sessionid=0x101501e5bde0001 2024-11-18T22:43:57,647 DEBUG [RS:0;3b0ca91ed64a:36431 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T22:43:57,647 DEBUG [RS:0;3b0ca91ed64a:36431 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3b0ca91ed64a,36431,1731969836026 2024-11-18T22:43:57,647 DEBUG [RS:0;3b0ca91ed64a:36431 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3b0ca91ed64a,36431,1731969836026' 2024-11-18T22:43:57,647 DEBUG [RS:0;3b0ca91ed64a:36431 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T22:43:57,648 DEBUG [RS:0;3b0ca91ed64a:36431 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T22:43:57,649 DEBUG [RS:0;3b0ca91ed64a:36431 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T22:43:57,649 DEBUG [RS:0;3b0ca91ed64a:36431 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T22:43:57,649 DEBUG [RS:0;3b0ca91ed64a:36431 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3b0ca91ed64a,36431,1731969836026 2024-11-18T22:43:57,649 DEBUG [RS:0;3b0ca91ed64a:36431 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3b0ca91ed64a,36431,1731969836026' 2024-11-18T22:43:57,649 DEBUG [RS:0;3b0ca91ed64a:36431 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T22:43:57,649 DEBUG [RS:0;3b0ca91ed64a:36431 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T22:43:57,650 DEBUG [RS:0;3b0ca91ed64a:36431 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T22:43:57,650 INFO [RS:0;3b0ca91ed64a:36431 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T22:43:57,650 INFO [RS:0;3b0ca91ed64a:36431 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T22:43:57,719 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:57,752 INFO [RS:0;3b0ca91ed64a:36431 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3b0ca91ed64a%2C36431%2C1731969836026, suffix=, logDir=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/WALs/3b0ca91ed64a,36431,1731969836026, archiveDir=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/oldWALs, maxLogs=32 2024-11-18T22:43:57,753 INFO [RS:0;3b0ca91ed64a:36431 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C36431%2C1731969836026.1731969837752 2024-11-18T22:43:57,785 INFO [RS:0;3b0ca91ed64a:36431 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/WALs/3b0ca91ed64a,36431,1731969836026/3b0ca91ed64a%2C36431%2C1731969836026.1731969837752 2024-11-18T22:43:57,786 WARN [3b0ca91ed64a:38543 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-18T22:43:57,793 DEBUG [RS:0;3b0ca91ed64a:36431 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37945:37945),(127.0.0.1/127.0.0.1:44773:44773)] 2024-11-18T22:43:58,003 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:58,036 DEBUG [3b0ca91ed64a:38543 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T22:43:58,037 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3b0ca91ed64a,36431,1731969836026 2024-11-18T22:43:58,041 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3b0ca91ed64a,36431,1731969836026, state=OPENING 2024-11-18T22:43:58,052 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T22:43:58,067 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T22:43:58,067 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3b0ca91ed64a,36431,1731969836026}] 2024-11-18T22:43:58,067 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36431-0x101501e5bde0001, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:43:58,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:43:58,078 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T22:43:58,078 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T22:43:58,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:58,232 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T22:43:58,235 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39999, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T22:43:58,245 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T22:43:58,245 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T22:43:58,247 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3b0ca91ed64a%2C36431%2C1731969836026.meta, suffix=.meta, logDir=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/WALs/3b0ca91ed64a,36431,1731969836026, archiveDir=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/oldWALs, maxLogs=32 2024-11-18T22:43:58,247 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C36431%2C1731969836026.meta.1731969838247.meta 2024-11-18T22:43:58,280 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/WALs/3b0ca91ed64a,36431,1731969836026/3b0ca91ed64a%2C36431%2C1731969836026.meta.1731969838247.meta 2024-11-18T22:43:58,320 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37945:37945),(127.0.0.1/127.0.0.1:44773:44773)] 2024-11-18T22:43:58,336 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T22:43:58,337 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T22:43:58,337 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T22:43:58,337 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T22:43:58,337 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T22:43:58,337 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:43:58,337 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T22:43:58,337 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T22:43:58,339 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T22:43:58,341 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T22:43:58,341 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:58,344 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:43:58,345 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T22:43:58,346 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T22:43:58,346 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:58,346 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:43:58,347 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T22:43:58,348 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T22:43:58,348 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:58,349 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:43:58,349 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T22:43:58,351 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T22:43:58,351 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:58,351 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:43:58,352 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T22:43:58,353 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/hbase/meta/1588230740 2024-11-18T22:43:58,355 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/hbase/meta/1588230740 2024-11-18T22:43:58,356 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T22:43:58,357 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T22:43:58,357 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T22:43:58,359 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T22:43:58,361 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=856483, jitterRate=0.08907462656497955}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T22:43:58,361 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T22:43:58,362 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731969838337Writing region info on filesystem at 1731969838337Initializing all the Stores at 1731969838338 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969838338Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969838338Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969838339 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969838339Cleaning up temporary data from old regions at 1731969838357 (+18 ms)Running coprocessor post-open hooks at 1731969838361 (+4 ms)Region opened successfully at 1731969838362 (+1 ms) 2024-11-18T22:43:58,363 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731969838232 2024-11-18T22:43:58,377 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T22:43:58,377 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T22:43:58,378 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3b0ca91ed64a,36431,1731969836026 2024-11-18T22:43:58,379 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3b0ca91ed64a,36431,1731969836026, state=OPEN 2024-11-18T22:43:58,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36431-0x101501e5bde0001, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T22:43:58,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T22:43:58,476 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T22:43:58,476 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3b0ca91ed64a,36431,1731969836026 2024-11-18T22:43:58,476 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T22:43:58,479 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T22:43:58,479 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3b0ca91ed64a,36431,1731969836026 in 409 msec 2024-11-18T22:43:58,483 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T22:43:58,483 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 851 msec 2024-11-18T22:43:58,484 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T22:43:58,484 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T22:43:58,485 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T22:43:58,486 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3b0ca91ed64a,36431,1731969836026, seqNum=-1] 2024-11-18T22:43:58,486 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T22:43:58,488 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38995, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T22:43:58,495 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 987 msec 2024-11-18T22:43:58,495 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731969838495, completionTime=-1 2024-11-18T22:43:58,495 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T22:43:58,495 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-18T22:43:58,498 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-18T22:43:58,498 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731969898498 2024-11-18T22:43:58,498 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731969958498 2024-11-18T22:43:58,498 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-18T22:43:58,498 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,38543,1731969835506-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:58,498 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,38543,1731969835506-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:58,498 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,38543,1731969835506-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:58,498 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3b0ca91ed64a:38543, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:58,498 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:58,500 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:58,501 DEBUG [master/3b0ca91ed64a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T22:43:58,505 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.883sec 2024-11-18T22:43:58,505 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T22:43:58,505 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T22:43:58,506 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T22:43:58,506 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T22:43:58,506 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T22:43:58,506 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,38543,1731969835506-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T22:43:58,506 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,38543,1731969835506-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T22:43:58,508 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c19f51f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T22:43:58,509 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3b0ca91ed64a,38543,-1 for getting cluster id 2024-11-18T22:43:58,509 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T22:43:58,510 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T22:43:58,511 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T22:43:58,511 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,38543,1731969835506-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:43:58,515 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '97b46833-c0c5-46e3-9b2e-8043f85a1975' 2024-11-18T22:43:58,516 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T22:43:58,516 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "97b46833-c0c5-46e3-9b2e-8043f85a1975" 2024-11-18T22:43:58,516 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33b3b7d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T22:43:58,516 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3b0ca91ed64a,38543,-1] 2024-11-18T22:43:58,516 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T22:43:58,517 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:43:58,518 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53446, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T22:43:58,519 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2bd2985a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T22:43:58,519 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T22:43:58,520 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3b0ca91ed64a,36431,1731969836026, seqNum=-1] 2024-11-18T22:43:58,521 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T22:43:58,522 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39898, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T22:43:58,524 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3b0ca91ed64a,38543,1731969835506 2024-11-18T22:43:58,524 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:43:58,527 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T22:43:58,527 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-18T22:43:58,528 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 3b0ca91ed64a,38543,1731969835506 2024-11-18T22:43:58,528 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4c184da8 2024-11-18T22:43:58,528 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-18T22:43:58,529 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53458, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-18T22:43:58,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38543 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-18T22:43:58,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38543 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-18T22:43:58,530 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38543 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T22:43:58,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38543 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T22:43:58,533 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-18T22:43:58,533 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:58,533 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38543 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-18T22:43:58,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38543 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T22:43:58,535 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-18T22:43:58,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741835_1011 (size=405) 2024-11-18T22:43:58,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741835_1011 (size=405) 2024-11-18T22:43:58,545 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a30ad6c0b1f51ea4b39d55ee78968df3, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d 2024-11-18T22:43:58,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741836_1012 (size=88) 2024-11-18T22:43:58,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741836_1012 (size=88) 2024-11-18T22:43:58,556 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:43:58,556 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing a30ad6c0b1f51ea4b39d55ee78968df3, disabling compactions & flushes 2024-11-18T22:43:58,556 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3. 2024-11-18T22:43:58,556 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3. 2024-11-18T22:43:58,556 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3. after waiting 0 ms 2024-11-18T22:43:58,556 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3. 2024-11-18T22:43:58,556 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3. 2024-11-18T22:43:58,556 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for a30ad6c0b1f51ea4b39d55ee78968df3: Waiting for close lock at 1731969838556Disabling compacts and flushes for region at 1731969838556Disabling writes for close at 1731969838556Writing region close event to WAL at 1731969838556Closed at 1731969838556 2024-11-18T22:43:58,558 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-18T22:43:58,558 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731969838558"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731969838558"}]},"ts":"1731969838558"} 2024-11-18T22:43:58,561 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-18T22:43:58,562 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-18T22:43:58,562 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731969838562"}]},"ts":"1731969838562"} 2024-11-18T22:43:58,565 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-18T22:43:58,565 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=a30ad6c0b1f51ea4b39d55ee78968df3, ASSIGN}] 2024-11-18T22:43:58,567 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=a30ad6c0b1f51ea4b39d55ee78968df3, ASSIGN 2024-11-18T22:43:58,568 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=a30ad6c0b1f51ea4b39d55ee78968df3, ASSIGN; state=OFFLINE, location=3b0ca91ed64a,36431,1731969836026; forceNewPlan=false, retain=false 2024-11-18T22:43:58,719 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a30ad6c0b1f51ea4b39d55ee78968df3, regionState=OPENING, regionLocation=3b0ca91ed64a,36431,1731969836026 2024-11-18T22:43:58,720 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:58,722 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=a30ad6c0b1f51ea4b39d55ee78968df3, ASSIGN because future has completed 2024-11-18T22:43:58,722 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a30ad6c0b1f51ea4b39d55ee78968df3, server=3b0ca91ed64a,36431,1731969836026}] 2024-11-18T22:43:58,881 INFO [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3. 2024-11-18T22:43:58,881 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => a30ad6c0b1f51ea4b39d55ee78968df3, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3.', STARTKEY => '', ENDKEY => ''} 2024-11-18T22:43:58,882 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling a30ad6c0b1f51ea4b39d55ee78968df3 2024-11-18T22:43:58,882 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:43:58,882 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for a30ad6c0b1f51ea4b39d55ee78968df3 2024-11-18T22:43:58,882 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for a30ad6c0b1f51ea4b39d55ee78968df3 2024-11-18T22:43:58,886 INFO [StoreOpener-a30ad6c0b1f51ea4b39d55ee78968df3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region a30ad6c0b1f51ea4b39d55ee78968df3 2024-11-18T22:43:58,888 INFO [StoreOpener-a30ad6c0b1f51ea4b39d55ee78968df3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a30ad6c0b1f51ea4b39d55ee78968df3 columnFamilyName info 2024-11-18T22:43:58,888 DEBUG [StoreOpener-a30ad6c0b1f51ea4b39d55ee78968df3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:43:58,888 INFO [StoreOpener-a30ad6c0b1f51ea4b39d55ee78968df3-1 {}] regionserver.HStore(327): Store=a30ad6c0b1f51ea4b39d55ee78968df3/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T22:43:58,888 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for a30ad6c0b1f51ea4b39d55ee78968df3 2024-11-18T22:43:58,889 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3 2024-11-18T22:43:58,890 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3 2024-11-18T22:43:58,891 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for a30ad6c0b1f51ea4b39d55ee78968df3 2024-11-18T22:43:58,891 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for a30ad6c0b1f51ea4b39d55ee78968df3 2024-11-18T22:43:58,893 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for a30ad6c0b1f51ea4b39d55ee78968df3 2024-11-18T22:43:58,897 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T22:43:58,897 INFO [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened a30ad6c0b1f51ea4b39d55ee78968df3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=789316, jitterRate=0.0036681443452835083}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T22:43:58,897 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a30ad6c0b1f51ea4b39d55ee78968df3 2024-11-18T22:43:58,898 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for a30ad6c0b1f51ea4b39d55ee78968df3: Running coprocessor pre-open hook at 1731969838882Writing region info on filesystem at 1731969838882Initializing all the Stores at 1731969838886 (+4 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969838886Cleaning up temporary data from old regions at 1731969838891 (+5 ms)Running coprocessor post-open hooks at 1731969838897 (+6 ms)Region opened successfully at 1731969838898 (+1 ms) 2024-11-18T22:43:58,899 INFO [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3., pid=6, masterSystemTime=1731969838877 2024-11-18T22:43:58,902 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3. 2024-11-18T22:43:58,902 INFO [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3. 2024-11-18T22:43:58,903 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a30ad6c0b1f51ea4b39d55ee78968df3, regionState=OPEN, openSeqNum=2, regionLocation=3b0ca91ed64a,36431,1731969836026 2024-11-18T22:43:58,911 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a30ad6c0b1f51ea4b39d55ee78968df3, server=3b0ca91ed64a,36431,1731969836026 because future has completed 2024-11-18T22:43:58,917 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-18T22:43:58,918 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure a30ad6c0b1f51ea4b39d55ee78968df3, server=3b0ca91ed64a,36431,1731969836026 in 192 msec 2024-11-18T22:43:58,921 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-18T22:43:58,921 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=a30ad6c0b1f51ea4b39d55ee78968df3, ASSIGN in 353 msec 2024-11-18T22:43:58,922 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-18T22:43:58,923 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731969838922"}]},"ts":"1731969838922"} 2024-11-18T22:43:58,925 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-18T22:43:58,927 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-18T22:43:58,929 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 397 msec 2024-11-18T22:43:59,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:59,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:43:59,721 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:00,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:00,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:00,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:01,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:01,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:01,722 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:02,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:02,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:02,236 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:02,236 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:02,236 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:02,236 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:02,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:02,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:02,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:02,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:02,262 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:02,262 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:02,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:02,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:02,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:02,269 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:02,723 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:02,773 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T22:44:02,775 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:02,775 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:02,775 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:02,775 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:02,802 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:02,802 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:02,802 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:02,803 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:02,803 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:02,804 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:02,810 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:02,810 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:02,811 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:02,814 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:03,006 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:03,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:03,586 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-18T22:44:03,587 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-18T22:44:03,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:04,007 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:04,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:04,724 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:05,007 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:05,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:05,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:06,008 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:06,009 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 after 68072ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor209.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:44:06,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:06,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta after 68099ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor209.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-18T22:44:06,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:07,009 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:07,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:07,236 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T22:44:07,236 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-18T22:44:07,237 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T22:44:07,237 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-18T22:44:07,237 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-18T22:44:07,237 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-18T22:44:07,238 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T22:44:07,238 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-18T22:44:07,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:08,010 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:08,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:08,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38543 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T22:44:08,638 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-18T22:44:08,638 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-18T22:44:08,643 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T22:44:08,643 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3. 2024-11-18T22:44:08,648 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3., hostname=3b0ca91ed64a,36431,1731969836026, seqNum=2] 2024-11-18T22:44:08,668 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38543 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T22:44:08,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38543 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T22:44:08,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38543 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-18T22:44:08,680 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-18T22:44:08,683 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-18T22:44:08,685 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-18T22:44:08,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:08,867 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36431 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-18T22:44:08,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3. 2024-11-18T22:44:08,868 INFO [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing a30ad6c0b1f51ea4b39d55ee78968df3 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-18T22:44:08,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/.tmp/info/dd75b777f3774b79a96696910b018a6e is 1080, key is row0001/info:/1731969848650/Put/seqid=0 2024-11-18T22:44:08,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741837_1013 (size=6033) 2024-11-18T22:44:08,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741837_1013 (size=6033) 2024-11-18T22:44:08,938 INFO [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/.tmp/info/dd75b777f3774b79a96696910b018a6e 2024-11-18T22:44:08,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/.tmp/info/dd75b777f3774b79a96696910b018a6e as hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/info/dd75b777f3774b79a96696910b018a6e 2024-11-18T22:44:08,967 INFO [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/info/dd75b777f3774b79a96696910b018a6e, entries=1, sequenceid=5, filesize=5.9 K 2024-11-18T22:44:08,969 INFO [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for a30ad6c0b1f51ea4b39d55ee78968df3 in 100ms, sequenceid=5, compaction requested=false 2024-11-18T22:44:08,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for a30ad6c0b1f51ea4b39d55ee78968df3: 2024-11-18T22:44:08,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3. 2024-11-18T22:44:08,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-18T22:44:08,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38543 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-18T22:44:08,981 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-18T22:44:08,982 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 289 msec 2024-11-18T22:44:08,987 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 313 msec 2024-11-18T22:44:09,011 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:09,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:09,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:10,012 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:10,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:10,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:11,012 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:11,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:11,770 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:12,013 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:12,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:12,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:13,016 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:13,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:13,772 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:14,017 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:14,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:14,776 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:15,018 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:15,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:15,780 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:16,018 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:16,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:16,784 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:17,020 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:17,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:17,785 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:18,021 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:18,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:18,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38543 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-18T22:44:18,694 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-18T22:44:18,700 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38543 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T22:44:18,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38543 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T22:44:18,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38543 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-18T22:44:18,704 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-18T22:44:18,706 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-18T22:44:18,706 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-18T22:44:18,786 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:18,860 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36431 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-18T22:44:18,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3. 2024-11-18T22:44:18,861 INFO [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing a30ad6c0b1f51ea4b39d55ee78968df3 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-18T22:44:18,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/.tmp/info/a171d5a682d64312bd8da55cd6c47048 is 1080, key is row0002/info:/1731969858696/Put/seqid=0 2024-11-18T22:44:18,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741838_1014 (size=6033) 2024-11-18T22:44:18,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741838_1014 (size=6033) 2024-11-18T22:44:19,021 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:19,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:19,275 INFO [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/.tmp/info/a171d5a682d64312bd8da55cd6c47048 2024-11-18T22:44:19,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/.tmp/info/a171d5a682d64312bd8da55cd6c47048 as hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/info/a171d5a682d64312bd8da55cd6c47048 2024-11-18T22:44:19,291 INFO [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/info/a171d5a682d64312bd8da55cd6c47048, entries=1, sequenceid=9, filesize=5.9 K 2024-11-18T22:44:19,293 INFO [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for a30ad6c0b1f51ea4b39d55ee78968df3 in 432ms, sequenceid=9, compaction requested=false 2024-11-18T22:44:19,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for a30ad6c0b1f51ea4b39d55ee78968df3: 2024-11-18T22:44:19,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3. 2024-11-18T22:44:19,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-18T22:44:19,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38543 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-18T22:44:19,298 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-18T22:44:19,298 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 589 msec 2024-11-18T22:44:19,300 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 597 msec 2024-11-18T22:44:19,786 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:20,022 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:20,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:20,788 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:21,023 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:21,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:21,789 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:22,024 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:22,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:22,790 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:23,024 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:23,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:23,790 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:24,025 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:24,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:24,791 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:25,026 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:25,052 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T22:44:25,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:25,792 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:26,026 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:26,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:26,792 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:27,027 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:27,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:27,793 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:28,028 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:28,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:28,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38543 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-18T22:44:28,733 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-18T22:44:28,737 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C36431%2C1731969836026.1731969868737 2024-11-18T22:44:28,761 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:28,764 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:28,764 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:28,765 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:28,765 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:28,765 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/WALs/3b0ca91ed64a,36431,1731969836026/3b0ca91ed64a%2C36431%2C1731969836026.1731969837752 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/WALs/3b0ca91ed64a,36431,1731969836026/3b0ca91ed64a%2C36431%2C1731969836026.1731969868737 2024-11-18T22:44:28,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741833_1009 (size=5546) 2024-11-18T22:44:28,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741833_1009 (size=5546) 2024-11-18T22:44:28,788 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37945:37945),(127.0.0.1/127.0.0.1:44773:44773)] 2024-11-18T22:44:28,792 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38543 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T22:44:28,794 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:28,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38543 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T22:44:28,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38543 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-18T22:44:28,809 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-18T22:44:28,817 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-18T22:44:28,817 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-18T22:44:28,974 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36431 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-18T22:44:28,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3. 2024-11-18T22:44:28,975 INFO [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing a30ad6c0b1f51ea4b39d55ee78968df3 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-18T22:44:28,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/.tmp/info/1359a9d95147436a90db42892922bbfe is 1080, key is row0003/info:/1731969868735/Put/seqid=0 2024-11-18T22:44:29,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741840_1016 (size=6033) 2024-11-18T22:44:29,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741840_1016 (size=6033) 2024-11-18T22:44:29,031 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:29,032 INFO [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/.tmp/info/1359a9d95147436a90db42892922bbfe 2024-11-18T22:44:29,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/.tmp/info/1359a9d95147436a90db42892922bbfe as hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/info/1359a9d95147436a90db42892922bbfe 2024-11-18T22:44:29,097 INFO [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/info/1359a9d95147436a90db42892922bbfe, entries=1, sequenceid=13, filesize=5.9 K 2024-11-18T22:44:29,099 INFO [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for a30ad6c0b1f51ea4b39d55ee78968df3 in 124ms, sequenceid=13, compaction requested=true 2024-11-18T22:44:29,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for a30ad6c0b1f51ea4b39d55ee78968df3: 2024-11-18T22:44:29,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3. 2024-11-18T22:44:29,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-18T22:44:29,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38543 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-18T22:44:29,112 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-18T22:44:29,112 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 287 msec 2024-11-18T22:44:29,116 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 321 msec 2024-11-18T22:44:29,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:29,795 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:30,032 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:30,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:30,796 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:31,032 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:31,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:31,796 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:32,033 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:32,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:32,797 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:33,034 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:33,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:33,798 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:34,034 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:34,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:34,799 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:35,035 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:35,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:35,799 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:36,036 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:36,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:36,800 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:37,037 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:37,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:37,801 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:38,037 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:38,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:38,512 INFO [master/3b0ca91ed64a:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-18T22:44:38,513 INFO [master/3b0ca91ed64a:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-18T22:44:38,802 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:38,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38543 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-18T22:44:38,894 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-18T22:44:38,894 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T22:44:38,896 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T22:44:38,896 DEBUG [Time-limited test {}] regionserver.HStore(1541): a30ad6c0b1f51ea4b39d55ee78968df3/info is initiating minor compaction (all files) 2024-11-18T22:44:38,896 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T22:44:38,896 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:44:38,897 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of a30ad6c0b1f51ea4b39d55ee78968df3/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3. 2024-11-18T22:44:38,897 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/info/dd75b777f3774b79a96696910b018a6e, hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/info/a171d5a682d64312bd8da55cd6c47048, hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/info/1359a9d95147436a90db42892922bbfe] into tmpdir=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/.tmp, totalSize=17.7 K 2024-11-18T22:44:38,897 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting dd75b777f3774b79a96696910b018a6e, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731969848650 2024-11-18T22:44:38,898 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting a171d5a682d64312bd8da55cd6c47048, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731969858696 2024-11-18T22:44:38,898 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 1359a9d95147436a90db42892922bbfe, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731969868735 2024-11-18T22:44:38,953 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): a30ad6c0b1f51ea4b39d55ee78968df3#info#compaction#47 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T22:44:38,953 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/.tmp/info/050b0d72f2754697872909c0d5940514 is 1080, key is row0001/info:/1731969848650/Put/seqid=0 2024-11-18T22:44:38,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741841_1017 (size=8296) 2024-11-18T22:44:38,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741841_1017 (size=8296) 2024-11-18T22:44:39,038 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:39,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:39,404 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/.tmp/info/050b0d72f2754697872909c0d5940514 as hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/info/050b0d72f2754697872909c0d5940514 2024-11-18T22:44:39,413 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a30ad6c0b1f51ea4b39d55ee78968df3/info of a30ad6c0b1f51ea4b39d55ee78968df3 into 050b0d72f2754697872909c0d5940514(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T22:44:39,414 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for a30ad6c0b1f51ea4b39d55ee78968df3: 2024-11-18T22:44:39,417 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C36431%2C1731969836026.1731969879417 2024-11-18T22:44:39,426 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:39,426 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:39,426 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:39,426 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:39,426 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:39,426 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/WALs/3b0ca91ed64a,36431,1731969836026/3b0ca91ed64a%2C36431%2C1731969836026.1731969868737 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/WALs/3b0ca91ed64a,36431,1731969836026/3b0ca91ed64a%2C36431%2C1731969836026.1731969879417 2024-11-18T22:44:39,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741839_1015 (size=2520) 2024-11-18T22:44:39,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741839_1015 (size=2520) 2024-11-18T22:44:39,436 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44773:44773),(127.0.0.1/127.0.0.1:37945:37945)] 2024-11-18T22:44:39,436 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/WALs/3b0ca91ed64a,36431,1731969836026/3b0ca91ed64a%2C36431%2C1731969836026.1731969837752 to hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/oldWALs/3b0ca91ed64a%2C36431%2C1731969836026.1731969837752 2024-11-18T22:44:39,441 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38543 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T22:44:39,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38543 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T22:44:39,444 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-18T22:44:39,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38543 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-18T22:44:39,445 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-18T22:44:39,446 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-18T22:44:39,601 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36431 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-18T22:44:39,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3. 2024-11-18T22:44:39,602 INFO [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing a30ad6c0b1f51ea4b39d55ee78968df3 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-18T22:44:39,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/.tmp/info/a2c1929eec4c495db5591a2ca256af5e is 1080, key is row0000/info:/1731969879415/Put/seqid=0 2024-11-18T22:44:39,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741843_1019 (size=6033) 2024-11-18T22:44:39,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741843_1019 (size=6033) 2024-11-18T22:44:39,627 INFO [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/.tmp/info/a2c1929eec4c495db5591a2ca256af5e 2024-11-18T22:44:39,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/.tmp/info/a2c1929eec4c495db5591a2ca256af5e as hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/info/a2c1929eec4c495db5591a2ca256af5e 2024-11-18T22:44:39,646 INFO [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/info/a2c1929eec4c495db5591a2ca256af5e, entries=1, sequenceid=18, filesize=5.9 K 2024-11-18T22:44:39,647 INFO [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for a30ad6c0b1f51ea4b39d55ee78968df3 in 46ms, sequenceid=18, compaction requested=false 2024-11-18T22:44:39,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for a30ad6c0b1f51ea4b39d55ee78968df3: 2024-11-18T22:44:39,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3. 2024-11-18T22:44:39,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-18T22:44:39,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38543 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-18T22:44:39,655 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-18T22:44:39,655 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 204 msec 2024-11-18T22:44:39,659 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 215 msec 2024-11-18T22:44:39,803 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:40,039 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:40,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:40,803 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:41,039 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:41,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:41,804 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:42,040 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:42,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:42,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:43,041 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:43,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:43,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:43,882 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region a30ad6c0b1f51ea4b39d55ee78968df3, had cached 0 bytes from a total of 14329 2024-11-18T22:44:44,041 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:44,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:44,806 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:45,042 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:45,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:45,807 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:46,043 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:46,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:46,807 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:47,043 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:47,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:47,808 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:48,044 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:48,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:48,809 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:49,045 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:49,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:49,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38543 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-18T22:44:49,544 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-18T22:44:49,548 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C36431%2C1731969836026.1731969889548 2024-11-18T22:44:49,565 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:49,565 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:49,565 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:49,565 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:49,565 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:49,565 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/WALs/3b0ca91ed64a,36431,1731969836026/3b0ca91ed64a%2C36431%2C1731969836026.1731969879417 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/WALs/3b0ca91ed64a,36431,1731969836026/3b0ca91ed64a%2C36431%2C1731969836026.1731969889548 2024-11-18T22:44:49,567 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37945:37945),(127.0.0.1/127.0.0.1:44773:44773)] 2024-11-18T22:44:49,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741842_1018 (size=2026) 2024-11-18T22:44:49,567 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/WALs/3b0ca91ed64a,36431,1731969836026/3b0ca91ed64a%2C36431%2C1731969836026.1731969879417 is not closed yet, will try archiving it next time 2024-11-18T22:44:49,567 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T22:44:49,567 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/WALs/3b0ca91ed64a,36431,1731969836026/3b0ca91ed64a%2C36431%2C1731969836026.1731969868737 to hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/oldWALs/3b0ca91ed64a%2C36431%2C1731969836026.1731969868737 2024-11-18T22:44:49,567 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T22:44:49,568 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T22:44:49,568 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:44:49,568 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:44:49,568 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T22:44:49,568 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1700543223, stopped=false 2024-11-18T22:44:49,568 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3b0ca91ed64a,38543,1731969835506 2024-11-18T22:44:49,568 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T22:44:49,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741842_1018 (size=2026) 2024-11-18T22:44:49,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T22:44:49,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36431-0x101501e5bde0001, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T22:44:49,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36431-0x101501e5bde0001, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:44:49,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:44:49,630 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T22:44:49,630 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T22:44:49,630 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T22:44:49,630 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:44:49,631 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3b0ca91ed64a,36431,1731969836026' ***** 2024-11-18T22:44:49,631 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T22:44:49,631 INFO [RS:0;3b0ca91ed64a:36431 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T22:44:49,631 INFO [RS:0;3b0ca91ed64a:36431 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T22:44:49,631 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T22:44:49,631 INFO [RS:0;3b0ca91ed64a:36431 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T22:44:49,632 INFO [RS:0;3b0ca91ed64a:36431 {}] regionserver.HRegionServer(3091): Received CLOSE for a30ad6c0b1f51ea4b39d55ee78968df3 2024-11-18T22:44:49,632 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:44:49,632 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36431-0x101501e5bde0001, quorum=127.0.0.1:57150, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:44:49,632 INFO [RS:0;3b0ca91ed64a:36431 {}] regionserver.HRegionServer(959): stopping server 3b0ca91ed64a,36431,1731969836026 2024-11-18T22:44:49,632 INFO [RS:0;3b0ca91ed64a:36431 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T22:44:49,632 INFO [RS:0;3b0ca91ed64a:36431 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3b0ca91ed64a:36431. 2024-11-18T22:44:49,632 DEBUG [RS:0;3b0ca91ed64a:36431 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T22:44:49,632 DEBUG [RS:0;3b0ca91ed64a:36431 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:44:49,632 INFO [RS:0;3b0ca91ed64a:36431 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T22:44:49,632 INFO [RS:0;3b0ca91ed64a:36431 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T22:44:49,632 INFO [RS:0;3b0ca91ed64a:36431 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T22:44:49,633 INFO [RS:0;3b0ca91ed64a:36431 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T22:44:49,633 INFO [RS:0;3b0ca91ed64a:36431 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-18T22:44:49,633 DEBUG [RS:0;3b0ca91ed64a:36431 {}] regionserver.HRegionServer(1325): Online Regions={a30ad6c0b1f51ea4b39d55ee78968df3=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3., 1588230740=hbase:meta,,1.1588230740} 2024-11-18T22:44:49,633 DEBUG [RS:0;3b0ca91ed64a:36431 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, a30ad6c0b1f51ea4b39d55ee78968df3 2024-11-18T22:44:49,633 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a30ad6c0b1f51ea4b39d55ee78968df3, disabling compactions & flushes 2024-11-18T22:44:49,633 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3. 2024-11-18T22:44:49,633 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3. 2024-11-18T22:44:49,633 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3. after waiting 0 ms 2024-11-18T22:44:49,633 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T22:44:49,633 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3. 2024-11-18T22:44:49,633 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T22:44:49,633 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T22:44:49,633 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T22:44:49,633 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T22:44:49,633 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing a30ad6c0b1f51ea4b39d55ee78968df3 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-18T22:44:49,633 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-18T22:44:49,638 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/.tmp/info/3c55bc1c76c94eeba6d8624b7c04f678 is 1080, key is row0001/info:/1731969889546/Put/seqid=0 2024-11-18T22:44:49,653 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/hbase/meta/1588230740/.tmp/info/8b53c0777a294086a321919691b28f78 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3./info:regioninfo/1731969838903/Put/seqid=0 2024-11-18T22:44:49,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741845_1021 (size=6033) 2024-11-18T22:44:49,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741845_1021 (size=6033) 2024-11-18T22:44:49,656 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/.tmp/info/3c55bc1c76c94eeba6d8624b7c04f678 2024-11-18T22:44:49,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741846_1022 (size=7308) 2024-11-18T22:44:49,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741846_1022 (size=7308) 2024-11-18T22:44:49,663 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/hbase/meta/1588230740/.tmp/info/8b53c0777a294086a321919691b28f78 2024-11-18T22:44:49,664 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/.tmp/info/3c55bc1c76c94eeba6d8624b7c04f678 as hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/info/3c55bc1c76c94eeba6d8624b7c04f678 2024-11-18T22:44:49,672 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/info/3c55bc1c76c94eeba6d8624b7c04f678, entries=1, sequenceid=22, filesize=5.9 K 2024-11-18T22:44:49,673 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for a30ad6c0b1f51ea4b39d55ee78968df3 in 40ms, sequenceid=22, compaction requested=true 2024-11-18T22:44:49,676 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/info/dd75b777f3774b79a96696910b018a6e, hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/info/a171d5a682d64312bd8da55cd6c47048, hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/info/1359a9d95147436a90db42892922bbfe] to archive 2024-11-18T22:44:49,677 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T22:44:49,682 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/info/dd75b777f3774b79a96696910b018a6e to hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/info/dd75b777f3774b79a96696910b018a6e 2024-11-18T22:44:49,684 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/info/a171d5a682d64312bd8da55cd6c47048 to hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/info/a171d5a682d64312bd8da55cd6c47048 2024-11-18T22:44:49,686 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/info/1359a9d95147436a90db42892922bbfe to hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/info/1359a9d95147436a90db42892922bbfe 2024-11-18T22:44:49,687 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=3b0ca91ed64a:38543 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-18T22:44:49,687 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [dd75b777f3774b79a96696910b018a6e=6033, a171d5a682d64312bd8da55cd6c47048=6033, 1359a9d95147436a90db42892922bbfe=6033] 2024-11-18T22:44:49,701 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/a30ad6c0b1f51ea4b39d55ee78968df3/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-18T22:44:49,702 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3. 2024-11-18T22:44:49,702 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a30ad6c0b1f51ea4b39d55ee78968df3: Waiting for close lock at 1731969889633Running coprocessor pre-close hooks at 1731969889633Disabling compacts and flushes for region at 1731969889633Disabling writes for close at 1731969889633Obtaining lock to block concurrent updates at 1731969889633Preparing flush snapshotting stores in a30ad6c0b1f51ea4b39d55ee78968df3 at 1731969889633Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731969889634 (+1 ms)Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3. at 1731969889635 (+1 ms)Flushing a30ad6c0b1f51ea4b39d55ee78968df3/info: creating writer at 1731969889635Flushing a30ad6c0b1f51ea4b39d55ee78968df3/info: appending metadata at 1731969889638 (+3 ms)Flushing a30ad6c0b1f51ea4b39d55ee78968df3/info: closing flushed file at 1731969889638Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6397c76b: reopening flushed file at 1731969889663 (+25 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for a30ad6c0b1f51ea4b39d55ee78968df3 in 40ms, sequenceid=22, compaction requested=true at 1731969889673 (+10 ms)Writing region close event to WAL at 1731969889696 (+23 ms)Running coprocessor post-close hooks at 1731969889702 (+6 ms)Closed at 1731969889702 2024-11-18T22:44:49,702 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731969838530.a30ad6c0b1f51ea4b39d55ee78968df3. 2024-11-18T22:44:49,706 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/hbase/meta/1588230740/.tmp/ns/24dd23ed4ed244668b6ee7de64253665 is 43, key is default/ns:d/1731969838488/Put/seqid=0 2024-11-18T22:44:49,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741847_1023 (size=5153) 2024-11-18T22:44:49,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741847_1023 (size=5153) 2024-11-18T22:44:49,809 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:49,833 DEBUG [RS:0;3b0ca91ed64a:36431 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-18T22:44:50,033 DEBUG [RS:0;3b0ca91ed64a:36431 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-18T22:44:50,045 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:50,120 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/hbase/meta/1588230740/.tmp/ns/24dd23ed4ed244668b6ee7de64253665 2024-11-18T22:44:50,147 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/hbase/meta/1588230740/.tmp/table/01e428089f094bf7859029eadea57a5c is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731969838922/Put/seqid=0 2024-11-18T22:44:50,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741848_1024 (size=5508) 2024-11-18T22:44:50,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741848_1024 (size=5508) 2024-11-18T22:44:50,165 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/hbase/meta/1588230740/.tmp/table/01e428089f094bf7859029eadea57a5c 2024-11-18T22:44:50,173 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/hbase/meta/1588230740/.tmp/info/8b53c0777a294086a321919691b28f78 as hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/hbase/meta/1588230740/info/8b53c0777a294086a321919691b28f78 2024-11-18T22:44:50,179 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/hbase/meta/1588230740/info/8b53c0777a294086a321919691b28f78, entries=10, sequenceid=11, filesize=7.1 K 2024-11-18T22:44:50,181 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/hbase/meta/1588230740/.tmp/ns/24dd23ed4ed244668b6ee7de64253665 as hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/hbase/meta/1588230740/ns/24dd23ed4ed244668b6ee7de64253665 2024-11-18T22:44:50,188 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/hbase/meta/1588230740/ns/24dd23ed4ed244668b6ee7de64253665, entries=2, sequenceid=11, filesize=5.0 K 2024-11-18T22:44:50,189 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/hbase/meta/1588230740/.tmp/table/01e428089f094bf7859029eadea57a5c as hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/hbase/meta/1588230740/table/01e428089f094bf7859029eadea57a5c 2024-11-18T22:44:50,195 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/hbase/meta/1588230740/table/01e428089f094bf7859029eadea57a5c, entries=2, sequenceid=11, filesize=5.4 K 2024-11-18T22:44:50,196 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 563ms, sequenceid=11, compaction requested=false 2024-11-18T22:44:50,216 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-18T22:44:50,216 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T22:44:50,217 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T22:44:50,217 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731969889633Running coprocessor pre-close hooks at 1731969889633Disabling compacts and flushes for region at 1731969889633Disabling writes for close at 1731969889633Obtaining lock to block concurrent updates at 1731969889633Preparing flush snapshotting stores in 1588230740 at 1731969889633Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731969889634 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731969889635 (+1 ms)Flushing 1588230740/info: creating writer at 1731969889635Flushing 1588230740/info: appending metadata at 1731969889652 (+17 ms)Flushing 1588230740/info: closing flushed file at 1731969889652Flushing 1588230740/ns: creating writer at 1731969889670 (+18 ms)Flushing 1588230740/ns: appending metadata at 1731969889706 (+36 ms)Flushing 1588230740/ns: closing flushed file at 1731969889706Flushing 1588230740/table: creating writer at 1731969890127 (+421 ms)Flushing 1588230740/table: appending metadata at 1731969890146 (+19 ms)Flushing 1588230740/table: closing flushed file at 1731969890146Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@378ab4ed: reopening flushed file at 1731969890171 (+25 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@577bd589: reopening flushed file at 1731969890180 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3a2dc5d5: reopening flushed file at 1731969890188 (+8 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 563ms, sequenceid=11, compaction requested=false at 1731969890196 (+8 ms)Writing region close event to WAL at 1731969890212 (+16 ms)Running coprocessor post-close hooks at 1731969890216 (+4 ms)Closed at 1731969890217 (+1 ms) 2024-11-18T22:44:50,217 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T22:44:50,233 INFO [RS:0;3b0ca91ed64a:36431 {}] regionserver.HRegionServer(976): stopping server 3b0ca91ed64a,36431,1731969836026; all regions closed. 2024-11-18T22:44:50,234 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:50,234 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:50,234 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:50,234 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:50,235 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:50,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741834_1010 (size=3306) 2024-11-18T22:44:50,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:50,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741834_1010 (size=3306) 2024-11-18T22:44:50,240 DEBUG [RS:0;3b0ca91ed64a:36431 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/oldWALs 2024-11-18T22:44:50,240 INFO [RS:0;3b0ca91ed64a:36431 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3b0ca91ed64a%2C36431%2C1731969836026.meta:.meta(num 1731969838247) 2024-11-18T22:44:50,241 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:50,241 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:50,241 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:50,241 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:50,242 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:50,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741844_1020 (size=1252) 2024-11-18T22:44:50,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741844_1020 (size=1252) 2024-11-18T22:44:50,247 DEBUG [RS:0;3b0ca91ed64a:36431 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/oldWALs 2024-11-18T22:44:50,247 INFO [RS:0;3b0ca91ed64a:36431 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3b0ca91ed64a%2C36431%2C1731969836026:(num 1731969889548) 2024-11-18T22:44:50,247 DEBUG [RS:0;3b0ca91ed64a:36431 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:44:50,247 INFO [RS:0;3b0ca91ed64a:36431 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T22:44:50,251 INFO [RS:0;3b0ca91ed64a:36431 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T22:44:50,251 INFO [RS:0;3b0ca91ed64a:36431 {}] hbase.ChoreService(370): Chore service for: regionserver/3b0ca91ed64a:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-18T22:44:50,251 INFO [RS:0;3b0ca91ed64a:36431 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T22:44:50,251 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T22:44:50,252 INFO [RS:0;3b0ca91ed64a:36431 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36431 2024-11-18T22:44:50,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36431-0x101501e5bde0001, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3b0ca91ed64a,36431,1731969836026 2024-11-18T22:44:50,277 INFO [RS:0;3b0ca91ed64a:36431 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T22:44:50,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T22:44:50,330 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3b0ca91ed64a,36431,1731969836026] 2024-11-18T22:44:50,353 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3b0ca91ed64a,36431,1731969836026 already deleted, retry=false 2024-11-18T22:44:50,353 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3b0ca91ed64a,36431,1731969836026 expired; onlineServers=0 2024-11-18T22:44:50,353 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3b0ca91ed64a,38543,1731969835506' ***** 2024-11-18T22:44:50,353 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T22:44:50,353 INFO [M:0;3b0ca91ed64a:38543 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T22:44:50,353 INFO [M:0;3b0ca91ed64a:38543 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T22:44:50,353 DEBUG [M:0;3b0ca91ed64a:38543 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T22:44:50,354 DEBUG [M:0;3b0ca91ed64a:38543 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T22:44:50,354 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.large.0-1731969837526 {}] cleaner.HFileCleaner(306): Exit Thread[master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.large.0-1731969837526,5,FailOnTimeoutGroup] 2024-11-18T22:44:50,354 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T22:44:50,354 INFO [M:0;3b0ca91ed64a:38543 {}] hbase.ChoreService(370): Chore service for: master/3b0ca91ed64a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T22:44:50,354 INFO [M:0;3b0ca91ed64a:38543 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T22:44:50,354 DEBUG [M:0;3b0ca91ed64a:38543 {}] master.HMaster(1795): Stopping service threads 2024-11-18T22:44:50,354 INFO [M:0;3b0ca91ed64a:38543 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T22:44:50,354 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.small.0-1731969837532 {}] cleaner.HFileCleaner(306): Exit Thread[master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.small.0-1731969837532,5,FailOnTimeoutGroup] 2024-11-18T22:44:50,354 INFO [M:0;3b0ca91ed64a:38543 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T22:44:50,355 INFO [M:0;3b0ca91ed64a:38543 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T22:44:50,355 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T22:44:50,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T22:44:50,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:44:50,373 DEBUG [M:0;3b0ca91ed64a:38543 {}] zookeeper.ZKUtil(347): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T22:44:50,373 WARN [M:0;3b0ca91ed64a:38543 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T22:44:50,374 INFO [M:0;3b0ca91ed64a:38543 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/.lastflushedseqids 2024-11-18T22:44:50,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741849_1025 (size=130) 2024-11-18T22:44:50,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741849_1025 (size=130) 2024-11-18T22:44:50,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36431-0x101501e5bde0001, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:44:50,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36431-0x101501e5bde0001, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:44:50,430 INFO [RS:0;3b0ca91ed64a:36431 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T22:44:50,431 INFO [RS:0;3b0ca91ed64a:36431 {}] regionserver.HRegionServer(1031): Exiting; stopping=3b0ca91ed64a,36431,1731969836026; zookeeper connection closed. 2024-11-18T22:44:50,431 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@44fb63a1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@44fb63a1 2024-11-18T22:44:50,431 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T22:44:50,795 INFO [M:0;3b0ca91ed64a:38543 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T22:44:50,795 INFO [M:0;3b0ca91ed64a:38543 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T22:44:50,795 DEBUG [M:0;3b0ca91ed64a:38543 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T22:44:50,795 INFO [M:0;3b0ca91ed64a:38543 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:44:50,795 DEBUG [M:0;3b0ca91ed64a:38543 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:44:50,796 DEBUG [M:0;3b0ca91ed64a:38543 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T22:44:50,796 DEBUG [M:0;3b0ca91ed64a:38543 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:44:50,796 INFO [M:0;3b0ca91ed64a:38543 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.58 KB heapSize=54.99 KB 2024-11-18T22:44:50,810 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:50,813 DEBUG [M:0;3b0ca91ed64a:38543 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/999794d713b2470bada17e96f17991bc is 82, key is hbase:meta,,1/info:regioninfo/1731969838378/Put/seqid=0 2024-11-18T22:44:50,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741850_1026 (size=5672) 2024-11-18T22:44:50,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741850_1026 (size=5672) 2024-11-18T22:44:50,818 INFO [M:0;3b0ca91ed64a:38543 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/999794d713b2470bada17e96f17991bc 2024-11-18T22:44:50,840 DEBUG [M:0;3b0ca91ed64a:38543 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8b0fdf058a0f4a0b8152c556e5377f2c is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731969838928/Put/seqid=0 2024-11-18T22:44:50,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741851_1027 (size=7822) 2024-11-18T22:44:50,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741851_1027 (size=7822) 2024-11-18T22:44:50,848 INFO [M:0;3b0ca91ed64a:38543 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.98 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8b0fdf058a0f4a0b8152c556e5377f2c 2024-11-18T22:44:50,858 INFO [M:0;3b0ca91ed64a:38543 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8b0fdf058a0f4a0b8152c556e5377f2c 2024-11-18T22:44:50,881 DEBUG [M:0;3b0ca91ed64a:38543 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c35f37f26b2c43f7a921ac24180a216c is 69, key is 3b0ca91ed64a,36431,1731969836026/rs:state/1731969837563/Put/seqid=0 2024-11-18T22:44:50,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741852_1028 (size=5156) 2024-11-18T22:44:50,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741852_1028 (size=5156) 2024-11-18T22:44:51,046 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:51,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:51,292 INFO [M:0;3b0ca91ed64a:38543 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c35f37f26b2c43f7a921ac24180a216c 2024-11-18T22:44:51,312 DEBUG [M:0;3b0ca91ed64a:38543 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d0a3396a4918473495f62b10810f8318 is 52, key is load_balancer_on/state:d/1731969838526/Put/seqid=0 2024-11-18T22:44:51,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741853_1029 (size=5056) 2024-11-18T22:44:51,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741853_1029 (size=5056) 2024-11-18T22:44:51,317 INFO [M:0;3b0ca91ed64a:38543 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d0a3396a4918473495f62b10810f8318 2024-11-18T22:44:51,321 DEBUG [M:0;3b0ca91ed64a:38543 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/999794d713b2470bada17e96f17991bc as hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/999794d713b2470bada17e96f17991bc 2024-11-18T22:44:51,327 INFO [M:0;3b0ca91ed64a:38543 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/999794d713b2470bada17e96f17991bc, entries=8, sequenceid=121, filesize=5.5 K 2024-11-18T22:44:51,328 DEBUG [M:0;3b0ca91ed64a:38543 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8b0fdf058a0f4a0b8152c556e5377f2c as hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8b0fdf058a0f4a0b8152c556e5377f2c 2024-11-18T22:44:51,336 INFO [M:0;3b0ca91ed64a:38543 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8b0fdf058a0f4a0b8152c556e5377f2c 2024-11-18T22:44:51,336 INFO [M:0;3b0ca91ed64a:38543 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8b0fdf058a0f4a0b8152c556e5377f2c, entries=14, sequenceid=121, filesize=7.6 K 2024-11-18T22:44:51,337 DEBUG [M:0;3b0ca91ed64a:38543 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c35f37f26b2c43f7a921ac24180a216c as hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c35f37f26b2c43f7a921ac24180a216c 2024-11-18T22:44:51,343 INFO [M:0;3b0ca91ed64a:38543 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c35f37f26b2c43f7a921ac24180a216c, entries=1, sequenceid=121, filesize=5.0 K 2024-11-18T22:44:51,344 DEBUG [M:0;3b0ca91ed64a:38543 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d0a3396a4918473495f62b10810f8318 as hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d0a3396a4918473495f62b10810f8318 2024-11-18T22:44:51,349 INFO [M:0;3b0ca91ed64a:38543 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41707/user/jenkins/test-data/41edc1fa-4e01-ef56-6590-c6fcd37e854d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d0a3396a4918473495f62b10810f8318, entries=1, sequenceid=121, filesize=4.9 K 2024-11-18T22:44:51,350 INFO [M:0;3b0ca91ed64a:38543 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.58 KB/44629, heapSize ~54.93 KB/56248, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 554ms, sequenceid=121, compaction requested=false 2024-11-18T22:44:51,352 INFO [M:0;3b0ca91ed64a:38543 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:44:51,352 DEBUG [M:0;3b0ca91ed64a:38543 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731969890795Disabling compacts and flushes for region at 1731969890795Disabling writes for close at 1731969890796 (+1 ms)Obtaining lock to block concurrent updates at 1731969890796Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731969890796Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44629, getHeapSize=56248, getOffHeapSize=0, getCellsCount=140 at 1731969890796Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731969890797 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731969890797Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731969890812 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731969890812Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731969890823 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731969890840 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731969890840Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731969890859 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731969890880 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731969890880Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731969891298 (+418 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731969891312 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731969891312Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@61b686ac: reopening flushed file at 1731969891321 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@9e5e563: reopening flushed file at 1731969891327 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@8f184a0: reopening flushed file at 1731969891336 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5dd0d396: reopening flushed file at 1731969891343 (+7 ms)Finished flush of dataSize ~43.58 KB/44629, heapSize ~54.93 KB/56248, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 554ms, sequenceid=121, compaction requested=false at 1731969891350 (+7 ms)Writing region close event to WAL at 1731969891352 (+2 ms)Closed at 1731969891352 2024-11-18T22:44:51,356 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:51,356 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:51,356 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:51,357 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:51,357 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:44:51,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43211 is added to blk_1073741830_1006 (size=53026) 2024-11-18T22:44:51,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38609 is added to blk_1073741830_1006 (size=53026) 2024-11-18T22:44:51,617 INFO [regionserver/3b0ca91ed64a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T22:44:51,761 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T22:44:51,761 INFO [M:0;3b0ca91ed64a:38543 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T22:44:51,761 INFO [M:0;3b0ca91ed64a:38543 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38543 2024-11-18T22:44:51,762 INFO [M:0;3b0ca91ed64a:38543 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T22:44:51,810 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:51,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:44:51,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38543-0x101501e5bde0000, quorum=127.0.0.1:57150, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:44:51,873 INFO [M:0;3b0ca91ed64a:38543 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T22:44:51,876 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3a860b8d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:44:51,877 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4c200310{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:44:51,877 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:44:51,877 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5559c5a9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:44:51,877 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@30b562f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/hadoop.log.dir/,STOPPED} 2024-11-18T22:44:51,879 WARN [BP-41218721-172.17.0.2-1731969831579 heartbeating to localhost/127.0.0.1:41707 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T22:44:51,879 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T22:44:51,879 WARN [BP-41218721-172.17.0.2-1731969831579 heartbeating to localhost/127.0.0.1:41707 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-41218721-172.17.0.2-1731969831579 (Datanode Uuid 2f0b7112-0e90-49df-8dcf-68ef96070da1) service to localhost/127.0.0.1:41707 2024-11-18T22:44:51,879 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T22:44:51,880 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/cluster_08859a72-e1c7-f223-88c0-d524e7973c58/data/data3/current/BP-41218721-172.17.0.2-1731969831579 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:44:51,880 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/cluster_08859a72-e1c7-f223-88c0-d524e7973c58/data/data4/current/BP-41218721-172.17.0.2-1731969831579 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:44:51,881 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T22:44:51,893 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@dac4784{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:44:51,894 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3f8c1b5d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:44:51,894 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:44:51,894 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@58200445{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:44:51,894 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65618dbb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/hadoop.log.dir/,STOPPED} 2024-11-18T22:44:51,897 WARN [BP-41218721-172.17.0.2-1731969831579 heartbeating to localhost/127.0.0.1:41707 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T22:44:51,897 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T22:44:51,897 WARN [BP-41218721-172.17.0.2-1731969831579 heartbeating to localhost/127.0.0.1:41707 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-41218721-172.17.0.2-1731969831579 (Datanode Uuid ae5d9546-f9dd-41e6-b147-bd70c34f98ee) service to localhost/127.0.0.1:41707 2024-11-18T22:44:51,897 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T22:44:51,898 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/cluster_08859a72-e1c7-f223-88c0-d524e7973c58/data/data2/current/BP-41218721-172.17.0.2-1731969831579 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:44:51,898 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T22:44:51,900 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/cluster_08859a72-e1c7-f223-88c0-d524e7973c58/data/data1/current/BP-41218721-172.17.0.2-1731969831579 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:44:51,908 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@97a3b39{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T22:44:51,909 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@14cf53f6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:44:51,909 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:44:51,909 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2898a7f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:44:51,909 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6dae72b0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/hadoop.log.dir/,STOPPED} 2024-11-18T22:44:51,922 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T22:44:51,947 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T22:44:51,959 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=204 (was 181) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41707 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:41707 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41707 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:41707 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41707 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:41707 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:41707 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41707 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=949 (was 960), ProcessCount=11 (was 11), AvailableMemoryMB=5353 (was 6780) 2024-11-18T22:44:51,970 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=204, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=949, ProcessCount=11, AvailableMemoryMB=5353 2024-11-18T22:44:51,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T22:44:51,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/hadoop.log.dir so I do NOT create it in target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46 2024-11-18T22:44:51,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/986a3409-6bf2-3da5-6c71-b9c81d158ea2/hadoop.tmp.dir so I do NOT create it in target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46 2024-11-18T22:44:51,971 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/cluster_e6201b81-0b9d-d443-2822-e36d949cc7ee, deleteOnExit=true 2024-11-18T22:44:51,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T22:44:51,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/test.cache.data in system properties and HBase conf 2024-11-18T22:44:51,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T22:44:51,971 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/hadoop.log.dir in system properties and HBase conf 2024-11-18T22:44:51,972 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T22:44:51,972 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T22:44:51,972 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T22:44:51,972 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T22:44:51,972 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T22:44:51,972 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T22:44:51,972 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T22:44:51,972 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T22:44:51,972 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T22:44:51,972 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T22:44:51,972 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T22:44:51,973 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T22:44:51,973 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T22:44:51,973 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/nfs.dump.dir in system properties and HBase conf 2024-11-18T22:44:51,973 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/java.io.tmpdir in system properties and HBase conf 2024-11-18T22:44:51,973 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T22:44:51,973 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T22:44:51,973 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T22:44:51,989 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T22:44:52,046 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:52,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:52,377 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:44:52,384 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:44:52,388 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:44:52,388 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:44:52,388 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T22:44:52,389 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:44:52,389 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@55409da5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:44:52,389 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1215a9cb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:44:52,496 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2b89aa23{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/java.io.tmpdir/jetty-localhost-42289-hadoop-hdfs-3_4_1-tests_jar-_-any-4165599196578723417/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T22:44:52,496 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1cbb9cb0{HTTP/1.1, (http/1.1)}{localhost:42289} 2024-11-18T22:44:52,496 INFO [Time-limited test {}] server.Server(415): Started @286296ms 2024-11-18T22:44:52,509 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T22:44:52,791 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:44:52,795 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:44:52,796 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:44:52,796 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:44:52,796 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T22:44:52,797 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@f5cb74f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:44:52,798 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@722ee419{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:44:52,812 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:52,934 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1370e23f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/java.io.tmpdir/jetty-localhost-40639-hadoop-hdfs-3_4_1-tests_jar-_-any-5505881682091410968/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:44:52,936 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2cf5c358{HTTP/1.1, (http/1.1)}{localhost:40639} 2024-11-18T22:44:52,936 INFO [Time-limited test {}] server.Server(415): Started @286736ms 2024-11-18T22:44:52,940 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T22:44:52,991 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:44:52,995 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:44:53,001 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:44:53,001 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:44:53,001 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T22:44:53,001 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@9bd0638{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:44:53,002 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6bdfb606{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:44:53,047 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:53,116 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@33cca936{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/java.io.tmpdir/jetty-localhost-35603-hadoop-hdfs-3_4_1-tests_jar-_-any-1835659516313533616/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:44:53,117 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4f9d87e6{HTTP/1.1, (http/1.1)}{localhost:35603} 2024-11-18T22:44:53,117 INFO [Time-limited test {}] server.Server(415): Started @286917ms 2024-11-18T22:44:53,118 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T22:44:53,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:53,813 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:54,048 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:54,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:54,702 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:54,703 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:54,703 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:54,703 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:54,703 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:54,703 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:54,729 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:54,729 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:54,729 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:54,729 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:54,730 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:54,730 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:54,735 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:54,736 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:54,736 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:54,739 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:54,814 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:54,851 WARN [Thread-1990 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/cluster_e6201b81-0b9d-d443-2822-e36d949cc7ee/data/data1/current/BP-1003182609-172.17.0.2-1731969892022/current, will proceed with Du for space computation calculation, 2024-11-18T22:44:54,852 WARN [Thread-1991 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/cluster_e6201b81-0b9d-d443-2822-e36d949cc7ee/data/data2/current/BP-1003182609-172.17.0.2-1731969892022/current, will proceed with Du for space computation calculation, 2024-11-18T22:44:54,893 WARN [Thread-1955 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T22:44:54,896 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdb9fbd4ab7ea75dd with lease ID 0xfdff043e48ef47f5: Processing first storage report for DS-d4915a39-14bb-46ba-a78d-7c003f4de1a5 from datanode DatanodeRegistration(127.0.0.1:43435, datanodeUuid=9415e542-9dc7-4dee-a16f-5d5c5eabb9a1, infoPort=40677, infoSecurePort=0, ipcPort=37403, storageInfo=lv=-57;cid=testClusterID;nsid=1161940854;c=1731969892022) 2024-11-18T22:44:54,896 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdb9fbd4ab7ea75dd with lease ID 0xfdff043e48ef47f5: from storage DS-d4915a39-14bb-46ba-a78d-7c003f4de1a5 node DatanodeRegistration(127.0.0.1:43435, datanodeUuid=9415e542-9dc7-4dee-a16f-5d5c5eabb9a1, infoPort=40677, infoSecurePort=0, ipcPort=37403, storageInfo=lv=-57;cid=testClusterID;nsid=1161940854;c=1731969892022), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:44:54,896 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdb9fbd4ab7ea75dd with lease ID 0xfdff043e48ef47f5: Processing first storage report for DS-63d460ea-6783-4b5a-ba1b-e66995adac0c from datanode DatanodeRegistration(127.0.0.1:43435, datanodeUuid=9415e542-9dc7-4dee-a16f-5d5c5eabb9a1, infoPort=40677, infoSecurePort=0, ipcPort=37403, storageInfo=lv=-57;cid=testClusterID;nsid=1161940854;c=1731969892022) 2024-11-18T22:44:54,896 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdb9fbd4ab7ea75dd with lease ID 0xfdff043e48ef47f5: from storage DS-63d460ea-6783-4b5a-ba1b-e66995adac0c node DatanodeRegistration(127.0.0.1:43435, datanodeUuid=9415e542-9dc7-4dee-a16f-5d5c5eabb9a1, infoPort=40677, infoSecurePort=0, ipcPort=37403, storageInfo=lv=-57;cid=testClusterID;nsid=1161940854;c=1731969892022), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:44:55,048 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:55,119 WARN [Thread-2002 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/cluster_e6201b81-0b9d-d443-2822-e36d949cc7ee/data/data3/current/BP-1003182609-172.17.0.2-1731969892022/current, will proceed with Du for space computation calculation, 2024-11-18T22:44:55,119 WARN [Thread-2003 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/cluster_e6201b81-0b9d-d443-2822-e36d949cc7ee/data/data4/current/BP-1003182609-172.17.0.2-1731969892022/current, will proceed with Du for space computation calculation, 2024-11-18T22:44:55,142 WARN [Thread-1978 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T22:44:55,146 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x818bd47298b058ba with lease ID 0xfdff043e48ef47f6: Processing first storage report for DS-51de98e0-fcb4-424f-8d02-fa3101330b24 from datanode DatanodeRegistration(127.0.0.1:39621, datanodeUuid=2cde33cc-72ad-4f80-8284-45620e735d2d, infoPort=43885, infoSecurePort=0, ipcPort=43957, storageInfo=lv=-57;cid=testClusterID;nsid=1161940854;c=1731969892022) 2024-11-18T22:44:55,146 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x818bd47298b058ba with lease ID 0xfdff043e48ef47f6: from storage DS-51de98e0-fcb4-424f-8d02-fa3101330b24 node DatanodeRegistration(127.0.0.1:39621, datanodeUuid=2cde33cc-72ad-4f80-8284-45620e735d2d, infoPort=43885, infoSecurePort=0, ipcPort=43957, storageInfo=lv=-57;cid=testClusterID;nsid=1161940854;c=1731969892022), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:44:55,146 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x818bd47298b058ba with lease ID 0xfdff043e48ef47f6: Processing first storage report for DS-a83c0fa3-b4e1-4bba-ad35-22c1957e9a1a from datanode DatanodeRegistration(127.0.0.1:39621, datanodeUuid=2cde33cc-72ad-4f80-8284-45620e735d2d, infoPort=43885, infoSecurePort=0, ipcPort=43957, storageInfo=lv=-57;cid=testClusterID;nsid=1161940854;c=1731969892022) 2024-11-18T22:44:55,146 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x818bd47298b058ba with lease ID 0xfdff043e48ef47f6: from storage DS-a83c0fa3-b4e1-4bba-ad35-22c1957e9a1a node DatanodeRegistration(127.0.0.1:39621, datanodeUuid=2cde33cc-72ad-4f80-8284-45620e735d2d, infoPort=43885, infoSecurePort=0, ipcPort=43957, storageInfo=lv=-57;cid=testClusterID;nsid=1161940854;c=1731969892022), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:44:55,195 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46 2024-11-18T22:44:55,202 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/cluster_e6201b81-0b9d-d443-2822-e36d949cc7ee/zookeeper_0, clientPort=51879, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/cluster_e6201b81-0b9d-d443-2822-e36d949cc7ee/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/cluster_e6201b81-0b9d-d443-2822-e36d949cc7ee/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T22:44:55,204 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51879 2024-11-18T22:44:55,205 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:44:55,206 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:44:55,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741825_1001 (size=7) 2024-11-18T22:44:55,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741825_1001 (size=7) 2024-11-18T22:44:55,223 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37 with version=8 2024-11-18T22:44:55,224 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/hbase-staging 2024-11-18T22:44:55,226 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3b0ca91ed64a:0 server-side Connection retries=45 2024-11-18T22:44:55,226 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:44:55,226 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T22:44:55,226 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T22:44:55,226 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:44:55,226 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T22:44:55,226 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T22:44:55,226 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T22:44:55,227 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41377 2024-11-18T22:44:55,229 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41377 connecting to ZooKeeper ensemble=127.0.0.1:51879 2024-11-18T22:44:55,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:55,245 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T22:44:55,246 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:55,246 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:55,246 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:55,247 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:55,247 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:55,247 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:55,279 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:55,279 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:55,279 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:55,280 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:55,280 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:55,280 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:55,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:413770x0, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T22:44:55,285 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:55,285 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:55,286 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:44:55,288 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41377-0x101501f46cc0000 connected 2024-11-18T22:44:55,363 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:44:55,365 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:44:55,371 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:44:55,371 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37, hbase.cluster.distributed=false 2024-11-18T22:44:55,373 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T22:44:55,374 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41377 2024-11-18T22:44:55,374 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41377 2024-11-18T22:44:55,374 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41377 2024-11-18T22:44:55,375 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41377 2024-11-18T22:44:55,375 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41377 2024-11-18T22:44:55,393 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3b0ca91ed64a:0 server-side Connection retries=45 2024-11-18T22:44:55,393 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:44:55,393 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T22:44:55,393 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T22:44:55,393 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:44:55,393 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T22:44:55,393 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T22:44:55,393 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T22:44:55,394 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33617 2024-11-18T22:44:55,396 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33617 connecting to ZooKeeper ensemble=127.0.0.1:51879 2024-11-18T22:44:55,396 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:44:55,398 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:44:55,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:336170x0, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T22:44:55,405 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33617-0x101501f46cc0001 connected 2024-11-18T22:44:55,405 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33617-0x101501f46cc0001, quorum=127.0.0.1:51879, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:44:55,406 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T22:44:55,406 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T22:44:55,407 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33617-0x101501f46cc0001, quorum=127.0.0.1:51879, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T22:44:55,407 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33617-0x101501f46cc0001, quorum=127.0.0.1:51879, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T22:44:55,408 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33617 2024-11-18T22:44:55,408 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33617 2024-11-18T22:44:55,408 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33617 2024-11-18T22:44:55,409 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33617 2024-11-18T22:44:55,409 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33617 2024-11-18T22:44:55,425 DEBUG [M:0;3b0ca91ed64a:41377 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3b0ca91ed64a:41377 2024-11-18T22:44:55,425 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3b0ca91ed64a,41377,1731969895226 2024-11-18T22:44:55,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T22:44:55,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33617-0x101501f46cc0001, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T22:44:55,437 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3b0ca91ed64a,41377,1731969895226 2024-11-18T22:44:55,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33617-0x101501f46cc0001, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T22:44:55,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:44:55,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33617-0x101501f46cc0001, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:44:55,448 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T22:44:55,448 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3b0ca91ed64a,41377,1731969895226 from backup master directory 2024-11-18T22:44:55,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33617-0x101501f46cc0001, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T22:44:55,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3b0ca91ed64a,41377,1731969895226 2024-11-18T22:44:55,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T22:44:55,458 WARN [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T22:44:55,458 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3b0ca91ed64a,41377,1731969895226 2024-11-18T22:44:55,461 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/hbase.id] with ID: 9690eb8d-57e0-4cdf-b429-8ba30b84ea3c 2024-11-18T22:44:55,462 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/.tmp/hbase.id 2024-11-18T22:44:55,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741826_1002 (size=42) 2024-11-18T22:44:55,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741826_1002 (size=42) 2024-11-18T22:44:55,468 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/.tmp/hbase.id]:[hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/hbase.id] 2024-11-18T22:44:55,479 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:44:55,479 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T22:44:55,481 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-18T22:44:55,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33617-0x101501f46cc0001, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:44:55,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:44:55,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741827_1003 (size=196) 2024-11-18T22:44:55,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741827_1003 (size=196) 2024-11-18T22:44:55,500 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T22:44:55,501 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T22:44:55,501 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T22:44:55,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741828_1004 (size=1189) 2024-11-18T22:44:55,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741828_1004 (size=1189) 2024-11-18T22:44:55,509 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/data/master/store 2024-11-18T22:44:55,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741829_1005 (size=34) 2024-11-18T22:44:55,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741829_1005 (size=34) 2024-11-18T22:44:55,516 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:44:55,516 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T22:44:55,516 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:44:55,516 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:44:55,516 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T22:44:55,516 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:44:55,516 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:44:55,516 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731969895516Disabling compacts and flushes for region at 1731969895516Disabling writes for close at 1731969895516Writing region close event to WAL at 1731969895516Closed at 1731969895516 2024-11-18T22:44:55,517 WARN [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/data/master/store/.initializing 2024-11-18T22:44:55,517 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/WALs/3b0ca91ed64a,41377,1731969895226 2024-11-18T22:44:55,519 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3b0ca91ed64a%2C41377%2C1731969895226, suffix=, logDir=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/WALs/3b0ca91ed64a,41377,1731969895226, archiveDir=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/oldWALs, maxLogs=10 2024-11-18T22:44:55,520 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C41377%2C1731969895226.1731969895520 2024-11-18T22:44:55,527 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/WALs/3b0ca91ed64a,41377,1731969895226/3b0ca91ed64a%2C41377%2C1731969895226.1731969895520 2024-11-18T22:44:55,528 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43885:43885),(127.0.0.1/127.0.0.1:40677:40677)] 2024-11-18T22:44:55,532 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T22:44:55,533 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:44:55,533 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:44:55,533 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:44:55,534 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:44:55,536 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T22:44:55,536 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:44:55,536 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:44:55,536 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:44:55,538 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T22:44:55,538 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:44:55,538 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T22:44:55,538 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:44:55,539 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T22:44:55,539 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:44:55,540 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T22:44:55,540 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:44:55,541 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T22:44:55,541 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:44:55,542 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T22:44:55,542 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:44:55,542 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:44:55,543 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:44:55,544 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:44:55,544 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:44:55,544 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T22:44:55,546 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:44:55,548 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T22:44:55,548 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=759860, jitterRate=-0.03378920257091522}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T22:44:55,549 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731969895533Initializing all the Stores at 1731969895534 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969895534Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969895534Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969895534Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969895534Cleaning up temporary data from old regions at 1731969895544 (+10 ms)Region opened successfully at 1731969895549 (+5 ms) 2024-11-18T22:44:55,549 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T22:44:55,552 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16b777, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3b0ca91ed64a/172.17.0.2:0 2024-11-18T22:44:55,553 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T22:44:55,553 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T22:44:55,553 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T22:44:55,553 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T22:44:55,553 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-18T22:44:55,554 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T22:44:55,554 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T22:44:55,556 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T22:44:55,557 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T22:44:55,563 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T22:44:55,563 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T22:44:55,564 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T22:44:55,573 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T22:44:55,574 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T22:44:55,575 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T22:44:55,584 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T22:44:55,585 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T22:44:55,594 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T22:44:55,597 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T22:44:55,605 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T22:44:55,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33617-0x101501f46cc0001, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T22:44:55,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T22:44:55,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33617-0x101501f46cc0001, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:44:55,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:44:55,616 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3b0ca91ed64a,41377,1731969895226, sessionid=0x101501f46cc0000, setting cluster-up flag (Was=false) 2024-11-18T22:44:55,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33617-0x101501f46cc0001, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:44:55,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:44:55,668 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T22:44:55,670 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3b0ca91ed64a,41377,1731969895226 2024-11-18T22:44:55,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33617-0x101501f46cc0001, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:44:55,690 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:44:55,721 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T22:44:55,722 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3b0ca91ed64a,41377,1731969895226 2024-11-18T22:44:55,723 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T22:44:55,725 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T22:44:55,725 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T22:44:55,725 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T22:44:55,726 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3b0ca91ed64a,41377,1731969895226 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T22:44:55,727 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3b0ca91ed64a:0, corePoolSize=5, maxPoolSize=5 2024-11-18T22:44:55,727 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3b0ca91ed64a:0, corePoolSize=5, maxPoolSize=5 2024-11-18T22:44:55,727 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=5, maxPoolSize=5 2024-11-18T22:44:55,727 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=5, maxPoolSize=5 2024-11-18T22:44:55,727 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3b0ca91ed64a:0, corePoolSize=10, maxPoolSize=10 2024-11-18T22:44:55,727 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:44:55,727 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=2, maxPoolSize=2 2024-11-18T22:44:55,727 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:44:55,731 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T22:44:55,731 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T22:44:55,733 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:44:55,733 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T22:44:55,750 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731969925749 2024-11-18T22:44:55,750 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T22:44:55,750 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T22:44:55,750 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T22:44:55,750 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T22:44:55,750 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T22:44:55,750 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T22:44:55,751 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T22:44:55,751 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T22:44:55,751 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T22:44:55,751 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T22:44:55,752 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T22:44:55,752 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T22:44:55,752 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.large.0-1731969895752,5,FailOnTimeoutGroup] 2024-11-18T22:44:55,752 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.small.0-1731969895752,5,FailOnTimeoutGroup] 2024-11-18T22:44:55,752 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T22:44:55,752 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T22:44:55,752 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T22:44:55,752 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T22:44:55,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741831_1007 (size=1321) 2024-11-18T22:44:55,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741831_1007 (size=1321) 2024-11-18T22:44:55,754 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T22:44:55,754 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37 2024-11-18T22:44:55,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741832_1008 (size=32) 2024-11-18T22:44:55,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741832_1008 (size=32) 2024-11-18T22:44:55,765 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:44:55,766 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T22:44:55,767 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T22:44:55,767 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:44:55,768 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:44:55,768 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T22:44:55,769 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T22:44:55,769 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:44:55,769 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:44:55,769 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T22:44:55,770 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T22:44:55,770 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:44:55,771 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:44:55,771 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T22:44:55,771 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T22:44:55,772 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:44:55,772 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:44:55,772 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T22:44:55,773 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/hbase/meta/1588230740 2024-11-18T22:44:55,773 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/hbase/meta/1588230740 2024-11-18T22:44:55,774 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T22:44:55,774 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T22:44:55,774 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T22:44:55,775 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T22:44:55,778 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T22:44:55,779 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=693195, jitterRate=-0.1185579001903534}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T22:44:55,779 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731969895765Initializing all the Stores at 1731969895766 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969895766Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969895766Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969895766Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969895766Cleaning up temporary data from old regions at 1731969895774 (+8 ms)Region opened successfully at 1731969895779 (+5 ms) 2024-11-18T22:44:55,779 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T22:44:55,779 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T22:44:55,780 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T22:44:55,780 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T22:44:55,780 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T22:44:55,780 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T22:44:55,780 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731969895779Disabling compacts and flushes for region at 1731969895779Disabling writes for close at 1731969895780 (+1 ms)Writing region close event to WAL at 1731969895780Closed at 1731969895780 2024-11-18T22:44:55,782 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T22:44:55,782 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T22:44:55,782 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T22:44:55,784 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T22:44:55,786 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T22:44:55,811 INFO [RS:0;3b0ca91ed64a:33617 {}] regionserver.HRegionServer(746): ClusterId : 9690eb8d-57e0-4cdf-b429-8ba30b84ea3c 2024-11-18T22:44:55,811 DEBUG [RS:0;3b0ca91ed64a:33617 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T22:44:55,814 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:55,816 DEBUG [RS:0;3b0ca91ed64a:33617 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T22:44:55,817 DEBUG [RS:0;3b0ca91ed64a:33617 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T22:44:55,827 DEBUG [RS:0;3b0ca91ed64a:33617 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T22:44:55,827 DEBUG [RS:0;3b0ca91ed64a:33617 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c016bb4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3b0ca91ed64a/172.17.0.2:0 2024-11-18T22:44:55,843 DEBUG [RS:0;3b0ca91ed64a:33617 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3b0ca91ed64a:33617 2024-11-18T22:44:55,843 INFO [RS:0;3b0ca91ed64a:33617 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T22:44:55,843 INFO [RS:0;3b0ca91ed64a:33617 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T22:44:55,843 DEBUG [RS:0;3b0ca91ed64a:33617 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T22:44:55,844 INFO [RS:0;3b0ca91ed64a:33617 {}] regionserver.HRegionServer(2659): reportForDuty to master=3b0ca91ed64a,41377,1731969895226 with port=33617, startcode=1731969895393 2024-11-18T22:44:55,844 DEBUG [RS:0;3b0ca91ed64a:33617 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T22:44:55,846 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38635, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T22:44:55,847 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41377 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3b0ca91ed64a,33617,1731969895393 2024-11-18T22:44:55,847 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41377 {}] master.ServerManager(517): Registering regionserver=3b0ca91ed64a,33617,1731969895393 2024-11-18T22:44:55,849 DEBUG [RS:0;3b0ca91ed64a:33617 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37 2024-11-18T22:44:55,849 DEBUG [RS:0;3b0ca91ed64a:33617 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41169 2024-11-18T22:44:55,849 DEBUG [RS:0;3b0ca91ed64a:33617 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T22:44:55,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T22:44:55,858 DEBUG [RS:0;3b0ca91ed64a:33617 {}] zookeeper.ZKUtil(111): regionserver:33617-0x101501f46cc0001, quorum=127.0.0.1:51879, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3b0ca91ed64a,33617,1731969895393 2024-11-18T22:44:55,858 WARN [RS:0;3b0ca91ed64a:33617 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T22:44:55,858 INFO [RS:0;3b0ca91ed64a:33617 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T22:44:55,858 DEBUG [RS:0;3b0ca91ed64a:33617 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/WALs/3b0ca91ed64a,33617,1731969895393 2024-11-18T22:44:55,861 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3b0ca91ed64a,33617,1731969895393] 2024-11-18T22:44:55,863 INFO [RS:0;3b0ca91ed64a:33617 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T22:44:55,864 INFO [RS:0;3b0ca91ed64a:33617 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T22:44:55,864 INFO [RS:0;3b0ca91ed64a:33617 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T22:44:55,865 INFO [RS:0;3b0ca91ed64a:33617 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:44:55,865 INFO [RS:0;3b0ca91ed64a:33617 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T22:44:55,866 INFO [RS:0;3b0ca91ed64a:33617 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T22:44:55,866 INFO [RS:0;3b0ca91ed64a:33617 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T22:44:55,867 DEBUG [RS:0;3b0ca91ed64a:33617 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:44:55,867 DEBUG [RS:0;3b0ca91ed64a:33617 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:44:55,867 DEBUG [RS:0;3b0ca91ed64a:33617 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:44:55,867 DEBUG [RS:0;3b0ca91ed64a:33617 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:44:55,867 DEBUG [RS:0;3b0ca91ed64a:33617 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:44:55,867 DEBUG [RS:0;3b0ca91ed64a:33617 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3b0ca91ed64a:0, corePoolSize=2, maxPoolSize=2 2024-11-18T22:44:55,867 DEBUG [RS:0;3b0ca91ed64a:33617 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:44:55,867 DEBUG [RS:0;3b0ca91ed64a:33617 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:44:55,867 DEBUG [RS:0;3b0ca91ed64a:33617 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:44:55,867 DEBUG [RS:0;3b0ca91ed64a:33617 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:44:55,867 DEBUG [RS:0;3b0ca91ed64a:33617 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:44:55,867 DEBUG [RS:0;3b0ca91ed64a:33617 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:44:55,867 DEBUG [RS:0;3b0ca91ed64a:33617 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3b0ca91ed64a:0, corePoolSize=3, maxPoolSize=3 2024-11-18T22:44:55,867 DEBUG [RS:0;3b0ca91ed64a:33617 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0, corePoolSize=3, maxPoolSize=3 2024-11-18T22:44:55,868 INFO [RS:0;3b0ca91ed64a:33617 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T22:44:55,868 INFO [RS:0;3b0ca91ed64a:33617 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T22:44:55,868 INFO [RS:0;3b0ca91ed64a:33617 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:44:55,868 INFO [RS:0;3b0ca91ed64a:33617 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T22:44:55,868 INFO [RS:0;3b0ca91ed64a:33617 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T22:44:55,868 INFO [RS:0;3b0ca91ed64a:33617 {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,33617,1731969895393-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T22:44:55,885 INFO [RS:0;3b0ca91ed64a:33617 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T22:44:55,885 INFO [RS:0;3b0ca91ed64a:33617 {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,33617,1731969895393-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:44:55,886 INFO [RS:0;3b0ca91ed64a:33617 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:44:55,886 INFO [RS:0;3b0ca91ed64a:33617 {}] regionserver.Replication(171): 3b0ca91ed64a,33617,1731969895393 started 2024-11-18T22:44:55,903 INFO [RS:0;3b0ca91ed64a:33617 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:44:55,903 INFO [RS:0;3b0ca91ed64a:33617 {}] regionserver.HRegionServer(1482): Serving as 3b0ca91ed64a,33617,1731969895393, RpcServer on 3b0ca91ed64a/172.17.0.2:33617, sessionid=0x101501f46cc0001 2024-11-18T22:44:55,903 DEBUG [RS:0;3b0ca91ed64a:33617 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T22:44:55,903 DEBUG [RS:0;3b0ca91ed64a:33617 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3b0ca91ed64a,33617,1731969895393 2024-11-18T22:44:55,903 DEBUG [RS:0;3b0ca91ed64a:33617 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3b0ca91ed64a,33617,1731969895393' 2024-11-18T22:44:55,903 DEBUG [RS:0;3b0ca91ed64a:33617 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T22:44:55,903 DEBUG [RS:0;3b0ca91ed64a:33617 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T22:44:55,904 DEBUG [RS:0;3b0ca91ed64a:33617 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T22:44:55,904 DEBUG [RS:0;3b0ca91ed64a:33617 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T22:44:55,904 DEBUG [RS:0;3b0ca91ed64a:33617 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3b0ca91ed64a,33617,1731969895393 2024-11-18T22:44:55,904 DEBUG [RS:0;3b0ca91ed64a:33617 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3b0ca91ed64a,33617,1731969895393' 2024-11-18T22:44:55,904 DEBUG [RS:0;3b0ca91ed64a:33617 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T22:44:55,904 DEBUG [RS:0;3b0ca91ed64a:33617 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T22:44:55,905 DEBUG [RS:0;3b0ca91ed64a:33617 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T22:44:55,905 INFO [RS:0;3b0ca91ed64a:33617 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T22:44:55,905 INFO [RS:0;3b0ca91ed64a:33617 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T22:44:55,936 WARN [3b0ca91ed64a:41377 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-18T22:44:56,007 INFO [RS:0;3b0ca91ed64a:33617 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3b0ca91ed64a%2C33617%2C1731969895393, suffix=, logDir=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/WALs/3b0ca91ed64a,33617,1731969895393, archiveDir=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/oldWALs, maxLogs=32 2024-11-18T22:44:56,008 INFO [RS:0;3b0ca91ed64a:33617 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C33617%2C1731969895393.1731969896008 2024-11-18T22:44:56,016 INFO [RS:0;3b0ca91ed64a:33617 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/WALs/3b0ca91ed64a,33617,1731969895393/3b0ca91ed64a%2C33617%2C1731969895393.1731969896008 2024-11-18T22:44:56,017 DEBUG [RS:0;3b0ca91ed64a:33617 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43885:43885),(127.0.0.1/127.0.0.1:40677:40677)] 2024-11-18T22:44:56,049 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:56,186 DEBUG [3b0ca91ed64a:41377 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T22:44:56,187 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3b0ca91ed64a,33617,1731969895393 2024-11-18T22:44:56,188 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3b0ca91ed64a,33617,1731969895393, state=OPENING 2024-11-18T22:44:56,215 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T22:44:56,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:44:56,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33617-0x101501f46cc0001, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:44:56,227 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T22:44:56,227 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T22:44:56,227 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T22:44:56,227 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3b0ca91ed64a,33617,1731969895393}] 2024-11-18T22:44:56,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:56,381 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T22:44:56,382 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35835, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T22:44:56,385 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T22:44:56,385 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T22:44:56,387 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3b0ca91ed64a%2C33617%2C1731969895393.meta, suffix=.meta, logDir=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/WALs/3b0ca91ed64a,33617,1731969895393, archiveDir=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/oldWALs, maxLogs=32 2024-11-18T22:44:56,387 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C33617%2C1731969895393.meta.1731969896387.meta 2024-11-18T22:44:56,392 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/WALs/3b0ca91ed64a,33617,1731969895393/3b0ca91ed64a%2C33617%2C1731969895393.meta.1731969896387.meta 2024-11-18T22:44:56,392 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43885:43885),(127.0.0.1/127.0.0.1:40677:40677)] 2024-11-18T22:44:56,393 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T22:44:56,394 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T22:44:56,394 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T22:44:56,394 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T22:44:56,394 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T22:44:56,394 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:44:56,394 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T22:44:56,394 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T22:44:56,395 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T22:44:56,396 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T22:44:56,396 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:44:56,397 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:44:56,397 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T22:44:56,398 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T22:44:56,398 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:44:56,399 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:44:56,399 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T22:44:56,400 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T22:44:56,400 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:44:56,400 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:44:56,400 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T22:44:56,401 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T22:44:56,401 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:44:56,402 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:44:56,402 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T22:44:56,403 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/hbase/meta/1588230740 2024-11-18T22:44:56,404 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/hbase/meta/1588230740 2024-11-18T22:44:56,405 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T22:44:56,405 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T22:44:56,406 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T22:44:56,407 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T22:44:56,407 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=766390, jitterRate=-0.025485798716545105}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T22:44:56,407 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T22:44:56,408 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731969896394Writing region info on filesystem at 1731969896394Initializing all the Stores at 1731969896395 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969896395Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969896395Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969896395Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969896395Cleaning up temporary data from old regions at 1731969896405 (+10 ms)Running coprocessor post-open hooks at 1731969896408 (+3 ms)Region opened successfully at 1731969896408 2024-11-18T22:44:56,409 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731969896380 2024-11-18T22:44:56,411 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T22:44:56,411 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T22:44:56,412 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3b0ca91ed64a,33617,1731969895393 2024-11-18T22:44:56,413 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3b0ca91ed64a,33617,1731969895393, state=OPEN 2024-11-18T22:44:56,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33617-0x101501f46cc0001, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T22:44:56,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T22:44:56,532 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3b0ca91ed64a,33617,1731969895393 2024-11-18T22:44:56,532 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T22:44:56,532 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T22:44:56,535 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T22:44:56,535 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3b0ca91ed64a,33617,1731969895393 in 305 msec 2024-11-18T22:44:56,537 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T22:44:56,537 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 753 msec 2024-11-18T22:44:56,538 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T22:44:56,538 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T22:44:56,539 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T22:44:56,539 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3b0ca91ed64a,33617,1731969895393, seqNum=-1] 2024-11-18T22:44:56,539 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T22:44:56,541 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53747, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T22:44:56,550 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 823 msec 2024-11-18T22:44:56,550 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731969896550, completionTime=-1 2024-11-18T22:44:56,550 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T22:44:56,550 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-18T22:44:56,553 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-18T22:44:56,553 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731969956553 2024-11-18T22:44:56,553 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731970016553 2024-11-18T22:44:56,553 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-18T22:44:56,553 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,41377,1731969895226-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:44:56,553 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,41377,1731969895226-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:44:56,553 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,41377,1731969895226-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:44:56,553 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3b0ca91ed64a:41377, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:44:56,553 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T22:44:56,556 DEBUG [master/3b0ca91ed64a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T22:44:56,556 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T22:44:56,560 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.102sec 2024-11-18T22:44:56,561 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T22:44:56,561 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T22:44:56,561 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T22:44:56,561 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T22:44:56,561 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T22:44:56,561 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,41377,1731969895226-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T22:44:56,561 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,41377,1731969895226-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T22:44:56,563 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T22:44:56,563 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T22:44:56,564 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,41377,1731969895226-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:44:56,612 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@701b0256, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T22:44:56,612 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3b0ca91ed64a,41377,-1 for getting cluster id 2024-11-18T22:44:56,612 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T22:44:56,614 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '9690eb8d-57e0-4cdf-b429-8ba30b84ea3c' 2024-11-18T22:44:56,614 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T22:44:56,614 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "9690eb8d-57e0-4cdf-b429-8ba30b84ea3c" 2024-11-18T22:44:56,614 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b5783b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T22:44:56,617 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3b0ca91ed64a,41377,-1] 2024-11-18T22:44:56,617 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T22:44:56,617 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:44:56,618 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33306, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T22:44:56,619 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46d056a6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T22:44:56,620 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T22:44:56,622 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3b0ca91ed64a,33617,1731969895393, seqNum=-1] 2024-11-18T22:44:56,622 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T22:44:56,624 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56650, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T22:44:56,626 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3b0ca91ed64a,41377,1731969895226 2024-11-18T22:44:56,627 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:44:56,630 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T22:44:56,631 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-18T22:44:56,632 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 3b0ca91ed64a,41377,1731969895226 2024-11-18T22:44:56,632 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5d9250c9 2024-11-18T22:44:56,632 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-18T22:44:56,633 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33320, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-18T22:44:56,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-18T22:44:56,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-18T22:44:56,634 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T22:44:56,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-18T22:44:56,640 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-18T22:44:56,641 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:44:56,641 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-18T22:44:56,642 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-18T22:44:56,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T22:44:56,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741835_1011 (size=381) 2024-11-18T22:44:56,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741835_1011 (size=381) 2024-11-18T22:44:56,651 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0cda5b537c5711031301b341cec171a9, NAME => 'TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37 2024-11-18T22:44:56,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741836_1012 (size=64) 2024-11-18T22:44:56,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741836_1012 (size=64) 2024-11-18T22:44:56,658 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:44:56,658 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 0cda5b537c5711031301b341cec171a9, disabling compactions & flushes 2024-11-18T22:44:56,658 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9. 2024-11-18T22:44:56,658 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9. 2024-11-18T22:44:56,658 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9. after waiting 0 ms 2024-11-18T22:44:56,658 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9. 2024-11-18T22:44:56,658 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9. 2024-11-18T22:44:56,658 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0cda5b537c5711031301b341cec171a9: Waiting for close lock at 1731969896658Disabling compacts and flushes for region at 1731969896658Disabling writes for close at 1731969896658Writing region close event to WAL at 1731969896658Closed at 1731969896658 2024-11-18T22:44:56,660 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-18T22:44:56,660 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731969896660"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731969896660"}]},"ts":"1731969896660"} 2024-11-18T22:44:56,662 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-18T22:44:56,664 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-18T22:44:56,664 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731969896664"}]},"ts":"1731969896664"} 2024-11-18T22:44:56,666 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-18T22:44:56,666 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0cda5b537c5711031301b341cec171a9, ASSIGN}] 2024-11-18T22:44:56,668 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0cda5b537c5711031301b341cec171a9, ASSIGN 2024-11-18T22:44:56,669 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0cda5b537c5711031301b341cec171a9, ASSIGN; state=OFFLINE, location=3b0ca91ed64a,33617,1731969895393; forceNewPlan=false, retain=false 2024-11-18T22:44:56,815 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:56,819 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0cda5b537c5711031301b341cec171a9, regionState=OPENING, regionLocation=3b0ca91ed64a,33617,1731969895393 2024-11-18T22:44:56,822 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0cda5b537c5711031301b341cec171a9, ASSIGN because future has completed 2024-11-18T22:44:56,822 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0cda5b537c5711031301b341cec171a9, server=3b0ca91ed64a,33617,1731969895393}] 2024-11-18T22:44:56,982 INFO [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9. 2024-11-18T22:44:56,982 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 0cda5b537c5711031301b341cec171a9, NAME => 'TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9.', STARTKEY => '', ENDKEY => ''} 2024-11-18T22:44:56,983 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 0cda5b537c5711031301b341cec171a9 2024-11-18T22:44:56,983 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:44:56,983 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 0cda5b537c5711031301b341cec171a9 2024-11-18T22:44:56,983 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 0cda5b537c5711031301b341cec171a9 2024-11-18T22:44:56,992 INFO [StoreOpener-0cda5b537c5711031301b341cec171a9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 0cda5b537c5711031301b341cec171a9 2024-11-18T22:44:56,994 INFO [StoreOpener-0cda5b537c5711031301b341cec171a9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0cda5b537c5711031301b341cec171a9 columnFamilyName info 2024-11-18T22:44:56,994 DEBUG [StoreOpener-0cda5b537c5711031301b341cec171a9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:44:56,995 INFO [StoreOpener-0cda5b537c5711031301b341cec171a9-1 {}] regionserver.HStore(327): Store=0cda5b537c5711031301b341cec171a9/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T22:44:56,995 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 0cda5b537c5711031301b341cec171a9 2024-11-18T22:44:56,996 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9 2024-11-18T22:44:56,997 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9 2024-11-18T22:44:56,997 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 0cda5b537c5711031301b341cec171a9 2024-11-18T22:44:56,998 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 0cda5b537c5711031301b341cec171a9 2024-11-18T22:44:57,002 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 0cda5b537c5711031301b341cec171a9 2024-11-18T22:44:57,007 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T22:44:57,008 INFO [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 0cda5b537c5711031301b341cec171a9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=817336, jitterRate=0.03929772973060608}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T22:44:57,008 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0cda5b537c5711031301b341cec171a9 2024-11-18T22:44:57,009 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 0cda5b537c5711031301b341cec171a9: Running coprocessor pre-open hook at 1731969896983Writing region info on filesystem at 1731969896983Initializing all the Stores at 1731969896985 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969896985Cleaning up temporary data from old regions at 1731969896998 (+13 ms)Running coprocessor post-open hooks at 1731969897008 (+10 ms)Region opened successfully at 1731969897009 (+1 ms) 2024-11-18T22:44:57,012 INFO [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9., pid=6, masterSystemTime=1731969896976 2024-11-18T22:44:57,015 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9. 2024-11-18T22:44:57,015 INFO [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9. 2024-11-18T22:44:57,016 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0cda5b537c5711031301b341cec171a9, regionState=OPEN, openSeqNum=2, regionLocation=3b0ca91ed64a,33617,1731969895393 2024-11-18T22:44:57,019 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0cda5b537c5711031301b341cec171a9, server=3b0ca91ed64a,33617,1731969895393 because future has completed 2024-11-18T22:44:57,029 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-18T22:44:57,029 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 0cda5b537c5711031301b341cec171a9, server=3b0ca91ed64a,33617,1731969895393 in 204 msec 2024-11-18T22:44:57,033 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-18T22:44:57,033 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0cda5b537c5711031301b341cec171a9, ASSIGN in 364 msec 2024-11-18T22:44:57,034 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-18T22:44:57,035 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731969897034"}]},"ts":"1731969897034"} 2024-11-18T22:44:57,041 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-18T22:44:57,042 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-18T22:44:57,049 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 408 msec 2024-11-18T22:44:57,050 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:57,236 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-18T22:44:57,236 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-18T22:44:57,237 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-18T22:44:57,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:57,815 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:58,051 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:58,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:58,816 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:59,052 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:59,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:44:59,817 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:00,053 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:00,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:00,818 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:01,053 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:01,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:01,818 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:01,896 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T22:45:01,897 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:01,897 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:01,897 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:01,897 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:01,898 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:01,898 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:01,925 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:01,925 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:01,925 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:01,925 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:01,926 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:01,926 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:01,931 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:01,931 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:01,932 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:01,942 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-18T22:45:01,943 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-18T22:45:02,054 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:02,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:02,819 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:03,054 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:03,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:03,820 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:04,055 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:04,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:04,820 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:05,056 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:05,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:05,821 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:06,056 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:06,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:06,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41377 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-18T22:45:06,734 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-18T22:45:06,734 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-18T22:45:06,737 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-18T22:45:06,737 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9. 2024-11-18T22:45:06,745 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9., hostname=3b0ca91ed64a,33617,1731969895393, seqNum=2] 2024-11-18T22:45:06,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33617 {}] regionserver.HRegion(8855): Flush requested on 0cda5b537c5711031301b341cec171a9 2024-11-18T22:45:06,762 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0cda5b537c5711031301b341cec171a9 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T22:45:06,783 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp/info/6727dafee4fa4028a06ad6a0311566f2 is 1080, key is row0001/info:/1731969906746/Put/seqid=0 2024-11-18T22:45:06,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741837_1013 (size=12509) 2024-11-18T22:45:06,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741837_1013 (size=12509) 2024-11-18T22:45:06,809 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp/info/6727dafee4fa4028a06ad6a0311566f2 2024-11-18T22:45:06,821 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:06,824 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp/info/6727dafee4fa4028a06ad6a0311566f2 as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/6727dafee4fa4028a06ad6a0311566f2 2024-11-18T22:45:06,831 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/6727dafee4fa4028a06ad6a0311566f2, entries=7, sequenceid=11, filesize=12.2 K 2024-11-18T22:45:06,832 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=22.07 KB/22596 for 0cda5b537c5711031301b341cec171a9 in 70ms, sequenceid=11, compaction requested=false 2024-11-18T22:45:06,832 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0cda5b537c5711031301b341cec171a9: 2024-11-18T22:45:06,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33617 {}] regionserver.HRegion(8855): Flush requested on 0cda5b537c5711031301b341cec171a9 2024-11-18T22:45:06,834 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0cda5b537c5711031301b341cec171a9 1/1 column families, dataSize=23.12 KB heapSize=25 KB 2024-11-18T22:45:06,854 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp/info/b64abeba5fce4effbbfe701b1dcd4d5d is 1080, key is row0008/info:/1731969906764/Put/seqid=0 2024-11-18T22:45:06,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741838_1014 (size=28684) 2024-11-18T22:45:06,867 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=23.12 KB at sequenceid=36 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp/info/b64abeba5fce4effbbfe701b1dcd4d5d 2024-11-18T22:45:06,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741838_1014 (size=28684) 2024-11-18T22:45:06,877 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp/info/b64abeba5fce4effbbfe701b1dcd4d5d as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/b64abeba5fce4effbbfe701b1dcd4d5d 2024-11-18T22:45:06,890 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/b64abeba5fce4effbbfe701b1dcd4d5d, entries=22, sequenceid=36, filesize=28.0 K 2024-11-18T22:45:06,893 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.12 KB/23672, heapSize ~24.98 KB/25584, currentSize=3.15 KB/3228 for 0cda5b537c5711031301b341cec171a9 in 59ms, sequenceid=36, compaction requested=false 2024-11-18T22:45:06,893 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0cda5b537c5711031301b341cec171a9: 2024-11-18T22:45:06,893 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.2 K, sizeToCheck=16.0 K 2024-11-18T22:45:06,893 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:45:06,893 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/b64abeba5fce4effbbfe701b1dcd4d5d because midkey is the same as first or last row 2024-11-18T22:45:07,057 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:07,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:07,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:08,058 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:08,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:08,823 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:08,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33617 {}] regionserver.HRegion(8855): Flush requested on 0cda5b537c5711031301b341cec171a9 2024-11-18T22:45:08,855 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0cda5b537c5711031301b341cec171a9 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T22:45:08,862 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp/info/d3c3c69092bc47a1afd5df515e4ffbcd is 1080, key is row0030/info:/1731969906835/Put/seqid=0 2024-11-18T22:45:08,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741839_1015 (size=12509) 2024-11-18T22:45:08,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741839_1015 (size=12509) 2024-11-18T22:45:08,974 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33617 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=0cda5b537c5711031301b341cec171a9, server=3b0ca91ed64a,33617,1731969895393 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-18T22:45:08,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33617 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:56650 deadline: 1731969918973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=0cda5b537c5711031301b341cec171a9, server=3b0ca91ed64a,33617,1731969895393 2024-11-18T22:45:08,997 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9., hostname=3b0ca91ed64a,33617,1731969895393, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9., hostname=3b0ca91ed64a,33617,1731969895393, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=0cda5b537c5711031301b341cec171a9, server=3b0ca91ed64a,33617,1731969895393 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-18T22:45:08,998 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9., hostname=3b0ca91ed64a,33617,1731969895393, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=0cda5b537c5711031301b341cec171a9, server=3b0ca91ed64a,33617,1731969895393 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-18T22:45:08,998 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9., hostname=3b0ca91ed64a,33617,1731969895393, seqNum=2 because the exception is null or not the one we care about 2024-11-18T22:45:09,058 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:09,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:09,292 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=46 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp/info/d3c3c69092bc47a1afd5df515e4ffbcd 2024-11-18T22:45:09,299 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp/info/d3c3c69092bc47a1afd5df515e4ffbcd as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/d3c3c69092bc47a1afd5df515e4ffbcd 2024-11-18T22:45:09,304 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/d3c3c69092bc47a1afd5df515e4ffbcd, entries=7, sequenceid=46, filesize=12.2 K 2024-11-18T22:45:09,305 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 0cda5b537c5711031301b341cec171a9 in 450ms, sequenceid=46, compaction requested=true 2024-11-18T22:45:09,305 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0cda5b537c5711031301b341cec171a9: 2024-11-18T22:45:09,305 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=52.4 K, sizeToCheck=16.0 K 2024-11-18T22:45:09,305 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:45:09,305 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/b64abeba5fce4effbbfe701b1dcd4d5d because midkey is the same as first or last row 2024-11-18T22:45:09,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0cda5b537c5711031301b341cec171a9:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T22:45:09,306 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:45:09,306 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T22:45:09,307 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 53702 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T22:45:09,307 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HStore(1541): 0cda5b537c5711031301b341cec171a9/info is initiating minor compaction (all files) 2024-11-18T22:45:09,308 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0cda5b537c5711031301b341cec171a9/info in TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9. 2024-11-18T22:45:09,308 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/6727dafee4fa4028a06ad6a0311566f2, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/b64abeba5fce4effbbfe701b1dcd4d5d, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/d3c3c69092bc47a1afd5df515e4ffbcd] into tmpdir=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp, totalSize=52.4 K 2024-11-18T22:45:09,308 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6727dafee4fa4028a06ad6a0311566f2, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731969906746 2024-11-18T22:45:09,309 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.Compactor(225): Compacting b64abeba5fce4effbbfe701b1dcd4d5d, keycount=22, bloomtype=ROW, size=28.0 K, encoding=NONE, compression=NONE, seqNum=36, earliestPutTs=1731969906764 2024-11-18T22:45:09,309 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.Compactor(225): Compacting d3c3c69092bc47a1afd5df515e4ffbcd, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=46, earliestPutTs=1731969906835 2024-11-18T22:45:09,329 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0cda5b537c5711031301b341cec171a9#info#compaction#60 average throughput is 18.47 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T22:45:09,329 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp/info/3c474b4f043040cebef1959b70f82403 is 1080, key is row0001/info:/1731969906746/Put/seqid=0 2024-11-18T22:45:09,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741840_1016 (size=43901) 2024-11-18T22:45:09,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741840_1016 (size=43901) 2024-11-18T22:45:09,380 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp/info/3c474b4f043040cebef1959b70f82403 as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/3c474b4f043040cebef1959b70f82403 2024-11-18T22:45:09,396 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0cda5b537c5711031301b341cec171a9/info of 0cda5b537c5711031301b341cec171a9 into 3c474b4f043040cebef1959b70f82403(size=42.9 K), total size for store is 42.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T22:45:09,396 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0cda5b537c5711031301b341cec171a9: 2024-11-18T22:45:09,396 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9., storeName=0cda5b537c5711031301b341cec171a9/info, priority=13, startTime=1731969909305; duration=0sec 2024-11-18T22:45:09,396 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=42.9 K, sizeToCheck=16.0 K 2024-11-18T22:45:09,396 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:45:09,397 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/3c474b4f043040cebef1959b70f82403 because midkey is the same as first or last row 2024-11-18T22:45:09,397 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=42.9 K, sizeToCheck=16.0 K 2024-11-18T22:45:09,397 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:45:09,397 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/3c474b4f043040cebef1959b70f82403 because midkey is the same as first or last row 2024-11-18T22:45:09,397 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=42.9 K, sizeToCheck=16.0 K 2024-11-18T22:45:09,397 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:45:09,397 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/3c474b4f043040cebef1959b70f82403 because midkey is the same as first or last row 2024-11-18T22:45:09,397 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:45:09,397 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0cda5b537c5711031301b341cec171a9:info 2024-11-18T22:45:09,823 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:10,059 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:10,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:10,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:11,060 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:11,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:11,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:12,060 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:12,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:12,826 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:13,061 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:13,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:13,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:14,062 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:14,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:14,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:15,062 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:15,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:15,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:16,063 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:16,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:16,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:17,063 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:17,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:17,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:18,064 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:18,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:18,830 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:18,884 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=1, created chunk count=9, reused chunk count=54, reuseRatio=85.71% 2024-11-18T22:45:18,885 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-18T22:45:19,065 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:19,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33617 {}] regionserver.HRegion(8855): Flush requested on 0cda5b537c5711031301b341cec171a9 2024-11-18T22:45:19,095 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0cda5b537c5711031301b341cec171a9 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-18T22:45:19,100 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp/info/e91aa1f0c60446bda735a95760c4c7ac is 1080, key is row0037/info:/1731969908857/Put/seqid=0 2024-11-18T22:45:19,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741841_1017 (size=29761) 2024-11-18T22:45:19,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741841_1017 (size=29761) 2024-11-18T22:45:19,116 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp/info/e91aa1f0c60446bda735a95760c4c7ac 2024-11-18T22:45:19,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp/info/e91aa1f0c60446bda735a95760c4c7ac as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/e91aa1f0c60446bda735a95760c4c7ac 2024-11-18T22:45:19,129 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/e91aa1f0c60446bda735a95760c4c7ac, entries=23, sequenceid=73, filesize=29.1 K 2024-11-18T22:45:19,130 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=5.25 KB/5380 for 0cda5b537c5711031301b341cec171a9 in 35ms, sequenceid=73, compaction requested=false 2024-11-18T22:45:19,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0cda5b537c5711031301b341cec171a9: 2024-11-18T22:45:19,130 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=71.9 K, sizeToCheck=16.0 K 2024-11-18T22:45:19,130 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:45:19,130 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/3c474b4f043040cebef1959b70f82403 because midkey is the same as first or last row 2024-11-18T22:45:19,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:19,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:20,068 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:20,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:20,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:21,069 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:21,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33617 {}] regionserver.HRegion(8855): Flush requested on 0cda5b537c5711031301b341cec171a9 2024-11-18T22:45:21,112 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0cda5b537c5711031301b341cec171a9 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T22:45:21,116 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp/info/611b3a68823d445c9d872215943d51b3 is 1080, key is row0060/info:/1731969919096/Put/seqid=0 2024-11-18T22:45:21,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741842_1018 (size=12509) 2024-11-18T22:45:21,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741842_1018 (size=12509) 2024-11-18T22:45:21,122 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=83 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp/info/611b3a68823d445c9d872215943d51b3 2024-11-18T22:45:21,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp/info/611b3a68823d445c9d872215943d51b3 as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/611b3a68823d445c9d872215943d51b3 2024-11-18T22:45:21,137 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/611b3a68823d445c9d872215943d51b3, entries=7, sequenceid=83, filesize=12.2 K 2024-11-18T22:45:21,138 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 0cda5b537c5711031301b341cec171a9 in 26ms, sequenceid=83, compaction requested=true 2024-11-18T22:45:21,138 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0cda5b537c5711031301b341cec171a9: 2024-11-18T22:45:21,139 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=84.2 K, sizeToCheck=16.0 K 2024-11-18T22:45:21,139 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:45:21,139 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/3c474b4f043040cebef1959b70f82403 because midkey is the same as first or last row 2024-11-18T22:45:21,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0cda5b537c5711031301b341cec171a9:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T22:45:21,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:45:21,139 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T22:45:21,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33617 {}] regionserver.HRegion(8855): Flush requested on 0cda5b537c5711031301b341cec171a9 2024-11-18T22:45:21,140 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0cda5b537c5711031301b341cec171a9 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-18T22:45:21,140 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 86171 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T22:45:21,141 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] regionserver.HStore(1541): 0cda5b537c5711031301b341cec171a9/info is initiating minor compaction (all files) 2024-11-18T22:45:21,141 INFO [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0cda5b537c5711031301b341cec171a9/info in TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9. 2024-11-18T22:45:21,141 INFO [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/3c474b4f043040cebef1959b70f82403, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/e91aa1f0c60446bda735a95760c4c7ac, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/611b3a68823d445c9d872215943d51b3] into tmpdir=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp, totalSize=84.2 K 2024-11-18T22:45:21,142 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] compactions.Compactor(225): Compacting 3c474b4f043040cebef1959b70f82403, keycount=36, bloomtype=ROW, size=42.9 K, encoding=NONE, compression=NONE, seqNum=46, earliestPutTs=1731969906746 2024-11-18T22:45:21,142 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] compactions.Compactor(225): Compacting e91aa1f0c60446bda735a95760c4c7ac, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1731969908857 2024-11-18T22:45:21,143 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] compactions.Compactor(225): Compacting 611b3a68823d445c9d872215943d51b3, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1731969919096 2024-11-18T22:45:21,146 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp/info/79094f4de79948b4a524c5894275f97d is 1080, key is row0067/info:/1731969921113/Put/seqid=0 2024-11-18T22:45:21,162 INFO [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0cda5b537c5711031301b341cec171a9#info#compaction#64 average throughput is 22.58 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T22:45:21,163 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp/info/53b62ab98eed43c68127a2806d421ce6 is 1080, key is row0001/info:/1731969906746/Put/seqid=0 2024-11-18T22:45:21,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741843_1019 (size=16817) 2024-11-18T22:45:21,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741843_1019 (size=16817) 2024-11-18T22:45:21,166 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp/info/79094f4de79948b4a524c5894275f97d 2024-11-18T22:45:21,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741844_1020 (size=76455) 2024-11-18T22:45:21,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741844_1020 (size=76455) 2024-11-18T22:45:21,173 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp/info/79094f4de79948b4a524c5894275f97d as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/79094f4de79948b4a524c5894275f97d 2024-11-18T22:45:21,178 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp/info/53b62ab98eed43c68127a2806d421ce6 as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/53b62ab98eed43c68127a2806d421ce6 2024-11-18T22:45:21,185 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/79094f4de79948b4a524c5894275f97d, entries=11, sequenceid=97, filesize=16.4 K 2024-11-18T22:45:21,187 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=15.76 KB/16140 for 0cda5b537c5711031301b341cec171a9 in 47ms, sequenceid=97, compaction requested=false 2024-11-18T22:45:21,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0cda5b537c5711031301b341cec171a9: 2024-11-18T22:45:21,187 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=100.6 K, sizeToCheck=16.0 K 2024-11-18T22:45:21,187 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:45:21,187 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/3c474b4f043040cebef1959b70f82403 because midkey is the same as first or last row 2024-11-18T22:45:21,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33617 {}] regionserver.HRegion(8855): Flush requested on 0cda5b537c5711031301b341cec171a9 2024-11-18T22:45:21,191 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0cda5b537c5711031301b341cec171a9 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-18T22:45:21,205 INFO [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0cda5b537c5711031301b341cec171a9/info of 0cda5b537c5711031301b341cec171a9 into 53b62ab98eed43c68127a2806d421ce6(size=74.7 K), total size for store is 91.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T22:45:21,205 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0cda5b537c5711031301b341cec171a9: 2024-11-18T22:45:21,205 INFO [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9., storeName=0cda5b537c5711031301b341cec171a9/info, priority=13, startTime=1731969921139; duration=0sec 2024-11-18T22:45:21,205 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=91.1 K, sizeToCheck=16.0 K 2024-11-18T22:45:21,205 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:45:21,205 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=91.1 K, sizeToCheck=16.0 K 2024-11-18T22:45:21,205 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:45:21,205 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=91.1 K, sizeToCheck=16.0 K 2024-11-18T22:45:21,206 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:45:21,207 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp/info/fd9322b8b1ad47059e8c03a0b5f36e5f is 1080, key is row0078/info:/1731969921142/Put/seqid=0 2024-11-18T22:45:21,207 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:45:21,207 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:45:21,207 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0cda5b537c5711031301b341cec171a9:info 2024-11-18T22:45:21,209 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41377 {}] assignment.AssignmentManager(1355): Split request from 3b0ca91ed64a,33617,1731969895393, parent={ENCODED => 0cda5b537c5711031301b341cec171a9, NAME => 'TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-18T22:45:21,216 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41377 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=3b0ca91ed64a,33617,1731969895393 2024-11-18T22:45:21,223 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41377 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=0cda5b537c5711031301b341cec171a9, daughterA=a1f3f5bbc20befd26afd04901ad35af5, daughterB=9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:21,227 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=0cda5b537c5711031301b341cec171a9, daughterA=a1f3f5bbc20befd26afd04901ad35af5, daughterB=9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:21,228 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=0cda5b537c5711031301b341cec171a9, daughterA=a1f3f5bbc20befd26afd04901ad35af5, daughterB=9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:21,228 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=0cda5b537c5711031301b341cec171a9, daughterA=a1f3f5bbc20befd26afd04901ad35af5, daughterB=9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:21,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741845_1021 (size=23299) 2024-11-18T22:45:21,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741845_1021 (size=23299) 2024-11-18T22:45:21,240 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp/info/fd9322b8b1ad47059e8c03a0b5f36e5f 2024-11-18T22:45:21,241 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0cda5b537c5711031301b341cec171a9, UNASSIGN}] 2024-11-18T22:45:21,248 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0cda5b537c5711031301b341cec171a9, UNASSIGN 2024-11-18T22:45:21,250 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp/info/fd9322b8b1ad47059e8c03a0b5f36e5f as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/fd9322b8b1ad47059e8c03a0b5f36e5f 2024-11-18T22:45:21,251 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=0cda5b537c5711031301b341cec171a9, regionState=CLOSING, regionLocation=3b0ca91ed64a,33617,1731969895393 2024-11-18T22:45:21,254 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0cda5b537c5711031301b341cec171a9, UNASSIGN because future has completed 2024-11-18T22:45:21,256 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-18T22:45:21,257 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0cda5b537c5711031301b341cec171a9, server=3b0ca91ed64a,33617,1731969895393}] 2024-11-18T22:45:21,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:21,261 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/fd9322b8b1ad47059e8c03a0b5f36e5f, entries=17, sequenceid=117, filesize=22.8 K 2024-11-18T22:45:21,263 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=2.10 KB/2152 for 0cda5b537c5711031301b341cec171a9 in 73ms, sequenceid=117, compaction requested=true 2024-11-18T22:45:21,263 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0cda5b537c5711031301b341cec171a9: 2024-11-18T22:45:21,263 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=113.8 K, sizeToCheck=16.0 K 2024-11-18T22:45:21,263 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:45:21,264 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=113.8 K, sizeToCheck=16.0 K 2024-11-18T22:45:21,264 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:45:21,264 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=113.8 K, sizeToCheck=16.0 K 2024-11-18T22:45:21,264 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-18T22:45:21,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:45:21,269 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41377 {}] assignment.AssignmentManager(1355): Split request from 3b0ca91ed64a,33617,1731969895393, parent={ENCODED => 0cda5b537c5711031301b341cec171a9, NAME => 'TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-18T22:45:21,269 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41377 {}] assignment.AssignmentManager(1370): Ignoring split request from 3b0ca91ed64a,33617,1731969895393, parent={ENCODED => 0cda5b537c5711031301b341cec171a9, NAME => 'TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9.', STARTKEY => '', ENDKEY => ''} because parent is unknown or not open 2024-11-18T22:45:21,417 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 0cda5b537c5711031301b341cec171a9 2024-11-18T22:45:21,417 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-18T22:45:21,418 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 0cda5b537c5711031301b341cec171a9, disabling compactions & flushes 2024-11-18T22:45:21,418 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9. 2024-11-18T22:45:21,418 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9. 2024-11-18T22:45:21,418 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9. after waiting 0 ms 2024-11-18T22:45:21,418 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9. 2024-11-18T22:45:21,418 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 0cda5b537c5711031301b341cec171a9 1/1 column families, dataSize=2.10 KB heapSize=2.50 KB 2024-11-18T22:45:21,425 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp/info/56878517aadb4cb98cbf8e812279da11 is 1080, key is row0095/info:/1731969921193/Put/seqid=0 2024-11-18T22:45:21,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741846_1022 (size=7112) 2024-11-18T22:45:21,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741846_1022 (size=7112) 2024-11-18T22:45:21,433 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.10 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp/info/56878517aadb4cb98cbf8e812279da11 2024-11-18T22:45:21,440 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/.tmp/info/56878517aadb4cb98cbf8e812279da11 as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/56878517aadb4cb98cbf8e812279da11 2024-11-18T22:45:21,449 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/56878517aadb4cb98cbf8e812279da11, entries=2, sequenceid=123, filesize=6.9 K 2024-11-18T22:45:21,451 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~2.10 KB/2152, heapSize ~2.48 KB/2544, currentSize=0 B/0 for 0cda5b537c5711031301b341cec171a9 in 32ms, sequenceid=123, compaction requested=true 2024-11-18T22:45:21,451 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/6727dafee4fa4028a06ad6a0311566f2, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/b64abeba5fce4effbbfe701b1dcd4d5d, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/3c474b4f043040cebef1959b70f82403, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/d3c3c69092bc47a1afd5df515e4ffbcd, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/e91aa1f0c60446bda735a95760c4c7ac, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/611b3a68823d445c9d872215943d51b3] to archive 2024-11-18T22:45:21,455 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T22:45:21,457 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/6727dafee4fa4028a06ad6a0311566f2 to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/6727dafee4fa4028a06ad6a0311566f2 2024-11-18T22:45:21,458 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/b64abeba5fce4effbbfe701b1dcd4d5d to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/b64abeba5fce4effbbfe701b1dcd4d5d 2024-11-18T22:45:21,460 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/3c474b4f043040cebef1959b70f82403 to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/3c474b4f043040cebef1959b70f82403 2024-11-18T22:45:21,461 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/d3c3c69092bc47a1afd5df515e4ffbcd to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/d3c3c69092bc47a1afd5df515e4ffbcd 2024-11-18T22:45:21,463 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/e91aa1f0c60446bda735a95760c4c7ac to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/e91aa1f0c60446bda735a95760c4c7ac 2024-11-18T22:45:21,465 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/611b3a68823d445c9d872215943d51b3 to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/611b3a68823d445c9d872215943d51b3 2024-11-18T22:45:21,478 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=1 2024-11-18T22:45:21,479 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9. 2024-11-18T22:45:21,479 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 0cda5b537c5711031301b341cec171a9: Waiting for close lock at 1731969921418Running coprocessor pre-close hooks at 1731969921418Disabling compacts and flushes for region at 1731969921418Disabling writes for close at 1731969921418Obtaining lock to block concurrent updates at 1731969921418Preparing flush snapshotting stores in 0cda5b537c5711031301b341cec171a9 at 1731969921418Finished memstore snapshotting TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9., syncing WAL and waiting on mvcc, flushsize=dataSize=2152, getHeapSize=2544, getOffHeapSize=0, getCellsCount=2 at 1731969921418Flushing stores of TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9. at 1731969921421 (+3 ms)Flushing 0cda5b537c5711031301b341cec171a9/info: creating writer at 1731969921421Flushing 0cda5b537c5711031301b341cec171a9/info: appending metadata at 1731969921424 (+3 ms)Flushing 0cda5b537c5711031301b341cec171a9/info: closing flushed file at 1731969921424Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@49d4fda4: reopening flushed file at 1731969921439 (+15 ms)Finished flush of dataSize ~2.10 KB/2152, heapSize ~2.48 KB/2544, currentSize=0 B/0 for 0cda5b537c5711031301b341cec171a9 in 32ms, sequenceid=123, compaction requested=true at 1731969921451 (+12 ms)Writing region close event to WAL at 1731969921473 (+22 ms)Running coprocessor post-close hooks at 1731969921479 (+6 ms)Closed at 1731969921479 2024-11-18T22:45:21,482 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 0cda5b537c5711031301b341cec171a9 2024-11-18T22:45:21,483 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=0cda5b537c5711031301b341cec171a9, regionState=CLOSED 2024-11-18T22:45:21,485 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0cda5b537c5711031301b341cec171a9, server=3b0ca91ed64a,33617,1731969895393 because future has completed 2024-11-18T22:45:21,489 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-18T22:45:21,489 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 0cda5b537c5711031301b341cec171a9, server=3b0ca91ed64a,33617,1731969895393 in 230 msec 2024-11-18T22:45:21,492 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-18T22:45:21,492 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0cda5b537c5711031301b341cec171a9, UNASSIGN in 248 msec 2024-11-18T22:45:21,537 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:45:21,558 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 4 storefiles, region=0cda5b537c5711031301b341cec171a9, threads=4 2024-11-18T22:45:21,562 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/53b62ab98eed43c68127a2806d421ce6 for region: 0cda5b537c5711031301b341cec171a9 2024-11-18T22:45:21,564 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/56878517aadb4cb98cbf8e812279da11 for region: 0cda5b537c5711031301b341cec171a9 2024-11-18T22:45:21,565 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/79094f4de79948b4a524c5894275f97d for region: 0cda5b537c5711031301b341cec171a9 2024-11-18T22:45:21,568 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/fd9322b8b1ad47059e8c03a0b5f36e5f for region: 0cda5b537c5711031301b341cec171a9 2024-11-18T22:45:21,587 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/79094f4de79948b4a524c5894275f97d, top=true 2024-11-18T22:45:21,590 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/56878517aadb4cb98cbf8e812279da11, top=true 2024-11-18T22:45:21,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741847_1023 (size=27) 2024-11-18T22:45:21,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741847_1023 (size=27) 2024-11-18T22:45:21,599 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/TestLogRolling-testLogRolling=0cda5b537c5711031301b341cec171a9-79094f4de79948b4a524c5894275f97d for child: 9fcd2d84f49ff088c7131b4ceaefae7a, parent: 0cda5b537c5711031301b341cec171a9 2024-11-18T22:45:21,599 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/79094f4de79948b4a524c5894275f97d for region: 0cda5b537c5711031301b341cec171a9 2024-11-18T22:45:21,600 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/TestLogRolling-testLogRolling=0cda5b537c5711031301b341cec171a9-56878517aadb4cb98cbf8e812279da11 for child: 9fcd2d84f49ff088c7131b4ceaefae7a, parent: 0cda5b537c5711031301b341cec171a9 2024-11-18T22:45:21,601 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/56878517aadb4cb98cbf8e812279da11 for region: 0cda5b537c5711031301b341cec171a9 2024-11-18T22:45:21,602 DEBUG [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/fd9322b8b1ad47059e8c03a0b5f36e5f, top=true 2024-11-18T22:45:21,611 INFO [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/TestLogRolling-testLogRolling=0cda5b537c5711031301b341cec171a9-fd9322b8b1ad47059e8c03a0b5f36e5f for child: 9fcd2d84f49ff088c7131b4ceaefae7a, parent: 0cda5b537c5711031301b341cec171a9 2024-11-18T22:45:21,611 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/fd9322b8b1ad47059e8c03a0b5f36e5f for region: 0cda5b537c5711031301b341cec171a9 2024-11-18T22:45:21,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741848_1024 (size=27) 2024-11-18T22:45:21,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741848_1024 (size=27) 2024-11-18T22:45:21,615 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/53b62ab98eed43c68127a2806d421ce6 for region: 0cda5b537c5711031301b341cec171a9 2024-11-18T22:45:21,618 DEBUG [PEWorker-5 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 0cda5b537c5711031301b341cec171a9 Daughter A: [hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/a1f3f5bbc20befd26afd04901ad35af5/info/53b62ab98eed43c68127a2806d421ce6.0cda5b537c5711031301b341cec171a9] storefiles, Daughter B: [hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/53b62ab98eed43c68127a2806d421ce6.0cda5b537c5711031301b341cec171a9, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/TestLogRolling-testLogRolling=0cda5b537c5711031301b341cec171a9-56878517aadb4cb98cbf8e812279da11, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/TestLogRolling-testLogRolling=0cda5b537c5711031301b341cec171a9-79094f4de79948b4a524c5894275f97d, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/TestLogRolling-testLogRolling=0cda5b537c5711031301b341cec171a9-fd9322b8b1ad47059e8c03a0b5f36e5f] storefiles. 2024-11-18T22:45:21,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741849_1025 (size=71) 2024-11-18T22:45:21,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741849_1025 (size=71) 2024-11-18T22:45:21,643 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:45:21,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741850_1026 (size=71) 2024-11-18T22:45:21,672 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:45:21,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741850_1026 (size=71) 2024-11-18T22:45:21,694 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/a1f3f5bbc20befd26afd04901ad35af5/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-18T22:45:21,698 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-11-18T22:45:21,701 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731969921700"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731969921700"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731969921700"}]},"ts":"1731969921700"} 2024-11-18T22:45:21,701 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731969921216.a1f3f5bbc20befd26afd04901ad35af5.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731969921700"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731969921700"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731969921700"}]},"ts":"1731969921700"} 2024-11-18T22:45:21,701 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731969921700"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731969921700"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731969921700"}]},"ts":"1731969921700"} 2024-11-18T22:45:21,724 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a1f3f5bbc20befd26afd04901ad35af5, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9fcd2d84f49ff088c7131b4ceaefae7a, ASSIGN}] 2024-11-18T22:45:21,732 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9fcd2d84f49ff088c7131b4ceaefae7a, ASSIGN 2024-11-18T22:45:21,732 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a1f3f5bbc20befd26afd04901ad35af5, ASSIGN 2024-11-18T22:45:21,734 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9fcd2d84f49ff088c7131b4ceaefae7a, ASSIGN; state=SPLITTING_NEW, location=3b0ca91ed64a,33617,1731969895393; forceNewPlan=false, retain=false 2024-11-18T22:45:21,734 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a1f3f5bbc20befd26afd04901ad35af5, ASSIGN; state=SPLITTING_NEW, location=3b0ca91ed64a,33617,1731969895393; forceNewPlan=false, retain=false 2024-11-18T22:45:21,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:21,884 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=a1f3f5bbc20befd26afd04901ad35af5, regionState=OPENING, regionLocation=3b0ca91ed64a,33617,1731969895393 2024-11-18T22:45:21,884 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=9fcd2d84f49ff088c7131b4ceaefae7a, regionState=OPENING, regionLocation=3b0ca91ed64a,33617,1731969895393 2024-11-18T22:45:21,887 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a1f3f5bbc20befd26afd04901ad35af5, ASSIGN because future has completed 2024-11-18T22:45:21,888 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure a1f3f5bbc20befd26afd04901ad35af5, server=3b0ca91ed64a,33617,1731969895393}] 2024-11-18T22:45:21,889 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9fcd2d84f49ff088c7131b4ceaefae7a, ASSIGN because future has completed 2024-11-18T22:45:21,889 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9fcd2d84f49ff088c7131b4ceaefae7a, server=3b0ca91ed64a,33617,1731969895393}] 2024-11-18T22:45:22,051 INFO [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731969921216.a1f3f5bbc20befd26afd04901ad35af5. 2024-11-18T22:45:22,051 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => a1f3f5bbc20befd26afd04901ad35af5, NAME => 'TestLogRolling-testLogRolling,,1731969921216.a1f3f5bbc20befd26afd04901ad35af5.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-18T22:45:22,052 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling a1f3f5bbc20befd26afd04901ad35af5 2024-11-18T22:45:22,052 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731969921216.a1f3f5bbc20befd26afd04901ad35af5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:45:22,052 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for a1f3f5bbc20befd26afd04901ad35af5 2024-11-18T22:45:22,052 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for a1f3f5bbc20befd26afd04901ad35af5 2024-11-18T22:45:22,058 INFO [StoreOpener-a1f3f5bbc20befd26afd04901ad35af5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region a1f3f5bbc20befd26afd04901ad35af5 2024-11-18T22:45:22,059 INFO [StoreOpener-a1f3f5bbc20befd26afd04901ad35af5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a1f3f5bbc20befd26afd04901ad35af5 columnFamilyName info 2024-11-18T22:45:22,059 DEBUG [StoreOpener-a1f3f5bbc20befd26afd04901ad35af5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:45:22,072 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:22,127 DEBUG [StoreOpener-a1f3f5bbc20befd26afd04901ad35af5-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/a1f3f5bbc20befd26afd04901ad35af5/info/53b62ab98eed43c68127a2806d421ce6.0cda5b537c5711031301b341cec171a9->hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/53b62ab98eed43c68127a2806d421ce6-bottom 2024-11-18T22:45:22,128 INFO [StoreOpener-a1f3f5bbc20befd26afd04901ad35af5-1 {}] regionserver.HStore(327): Store=a1f3f5bbc20befd26afd04901ad35af5/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T22:45:22,129 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for a1f3f5bbc20befd26afd04901ad35af5 2024-11-18T22:45:22,132 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/a1f3f5bbc20befd26afd04901ad35af5 2024-11-18T22:45:22,135 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/a1f3f5bbc20befd26afd04901ad35af5 2024-11-18T22:45:22,135 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for a1f3f5bbc20befd26afd04901ad35af5 2024-11-18T22:45:22,135 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for a1f3f5bbc20befd26afd04901ad35af5 2024-11-18T22:45:22,138 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for a1f3f5bbc20befd26afd04901ad35af5 2024-11-18T22:45:22,140 INFO [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened a1f3f5bbc20befd26afd04901ad35af5; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=859634, jitterRate=0.09308184683322906}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T22:45:22,140 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a1f3f5bbc20befd26afd04901ad35af5 2024-11-18T22:45:22,141 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for a1f3f5bbc20befd26afd04901ad35af5: Running coprocessor pre-open hook at 1731969922052Writing region info on filesystem at 1731969922052Initializing all the Stores at 1731969922056 (+4 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969922056Cleaning up temporary data from old regions at 1731969922135 (+79 ms)Running coprocessor post-open hooks at 1731969922140 (+5 ms)Region opened successfully at 1731969922141 (+1 ms) 2024-11-18T22:45:22,142 INFO [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731969921216.a1f3f5bbc20befd26afd04901ad35af5., pid=12, masterSystemTime=1731969922042 2024-11-18T22:45:22,143 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store a1f3f5bbc20befd26afd04901ad35af5:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T22:45:22,143 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-18T22:45:22,143 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-18T22:45:22,144 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731969921216.a1f3f5bbc20befd26afd04901ad35af5. 2024-11-18T22:45:22,144 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HStore(1541): a1f3f5bbc20befd26afd04901ad35af5/info is initiating minor compaction (all files) 2024-11-18T22:45:22,144 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a1f3f5bbc20befd26afd04901ad35af5/info in TestLogRolling-testLogRolling,,1731969921216.a1f3f5bbc20befd26afd04901ad35af5. 2024-11-18T22:45:22,145 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/a1f3f5bbc20befd26afd04901ad35af5/info/53b62ab98eed43c68127a2806d421ce6.0cda5b537c5711031301b341cec171a9->hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/53b62ab98eed43c68127a2806d421ce6-bottom] into tmpdir=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/a1f3f5bbc20befd26afd04901ad35af5/.tmp, totalSize=74.7 K 2024-11-18T22:45:22,145 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.Compactor(225): Compacting 53b62ab98eed43c68127a2806d421ce6.0cda5b537c5711031301b341cec171a9, keycount=33, bloomtype=ROW, size=74.7 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1731969906746 2024-11-18T22:45:22,149 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731969921216.a1f3f5bbc20befd26afd04901ad35af5. 2024-11-18T22:45:22,149 INFO [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731969921216.a1f3f5bbc20befd26afd04901ad35af5. 2024-11-18T22:45:22,149 INFO [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a. 2024-11-18T22:45:22,149 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 9fcd2d84f49ff088c7131b4ceaefae7a, NAME => 'TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-18T22:45:22,150 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:22,150 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:45:22,150 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:22,150 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:22,156 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=a1f3f5bbc20befd26afd04901ad35af5, regionState=OPEN, openSeqNum=127, regionLocation=3b0ca91ed64a,33617,1731969895393 2024-11-18T22:45:22,162 INFO [StoreOpener-9fcd2d84f49ff088c7131b4ceaefae7a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:22,166 INFO [StoreOpener-9fcd2d84f49ff088c7131b4ceaefae7a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9fcd2d84f49ff088c7131b4ceaefae7a columnFamilyName info 2024-11-18T22:45:22,166 DEBUG [StoreOpener-9fcd2d84f49ff088c7131b4ceaefae7a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:45:22,173 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33617 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-18T22:45:22,176 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-18T22:45:22,176 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-18T22:45:22,176 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure a1f3f5bbc20befd26afd04901ad35af5, server=3b0ca91ed64a,33617,1731969895393 because future has completed 2024-11-18T22:45:22,183 DEBUG [StoreOpener-9fcd2d84f49ff088c7131b4ceaefae7a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/53b62ab98eed43c68127a2806d421ce6.0cda5b537c5711031301b341cec171a9->hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/53b62ab98eed43c68127a2806d421ce6-top 2024-11-18T22:45:22,199 DEBUG [StoreOpener-9fcd2d84f49ff088c7131b4ceaefae7a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/TestLogRolling-testLogRolling=0cda5b537c5711031301b341cec171a9-56878517aadb4cb98cbf8e812279da11 2024-11-18T22:45:22,212 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-18T22:45:22,212 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure a1f3f5bbc20befd26afd04901ad35af5, server=3b0ca91ed64a,33617,1731969895393 in 304 msec 2024-11-18T22:45:22,213 DEBUG [StoreOpener-9fcd2d84f49ff088c7131b4ceaefae7a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/TestLogRolling-testLogRolling=0cda5b537c5711031301b341cec171a9-79094f4de79948b4a524c5894275f97d 2024-11-18T22:45:22,217 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a1f3f5bbc20befd26afd04901ad35af5#info#compaction#67 average throughput is 3.68 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T22:45:22,217 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/a1f3f5bbc20befd26afd04901ad35af5/.tmp/info/cd91c555daac459381673e9c7a03f84a is 1080, key is row0001/info:/1731969906746/Put/seqid=0 2024-11-18T22:45:22,220 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=a1f3f5bbc20befd26afd04901ad35af5, ASSIGN in 489 msec 2024-11-18T22:45:22,224 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/hbase/meta/1588230740/.tmp/info/4707685cf95e47d2ad2bbe1389ab534f is 193, key is TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a./info:regioninfo/1731969921884/Put/seqid=0 2024-11-18T22:45:22,246 DEBUG [StoreOpener-9fcd2d84f49ff088c7131b4ceaefae7a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/TestLogRolling-testLogRolling=0cda5b537c5711031301b341cec171a9-fd9322b8b1ad47059e8c03a0b5f36e5f 2024-11-18T22:45:22,246 INFO [StoreOpener-9fcd2d84f49ff088c7131b4ceaefae7a-1 {}] regionserver.HStore(327): Store=9fcd2d84f49ff088c7131b4ceaefae7a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T22:45:22,246 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:22,247 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:22,249 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:22,250 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:22,250 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:22,254 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:22,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741851_1027 (size=70862) 2024-11-18T22:45:22,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741851_1027 (size=70862) 2024-11-18T22:45:22,256 INFO [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 9fcd2d84f49ff088c7131b4ceaefae7a; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=803502, jitterRate=0.02170625329017639}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-18T22:45:22,257 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:22,257 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 9fcd2d84f49ff088c7131b4ceaefae7a: Running coprocessor pre-open hook at 1731969922150Writing region info on filesystem at 1731969922150Initializing all the Stores at 1731969922159 (+9 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969922159Cleaning up temporary data from old regions at 1731969922250 (+91 ms)Running coprocessor post-open hooks at 1731969922257 (+7 ms)Region opened successfully at 1731969922257 2024-11-18T22:45:22,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:22,259 INFO [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a., pid=13, masterSystemTime=1731969922042 2024-11-18T22:45:22,260 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 9fcd2d84f49ff088c7131b4ceaefae7a:info, priority=-2147483648, current under compaction store size is 2 2024-11-18T22:45:22,260 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:45:22,260 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-18T22:45:22,263 DEBUG [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a. 2024-11-18T22:45:22,263 INFO [RS_OPEN_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a. 2024-11-18T22:45:22,264 INFO [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a. 2024-11-18T22:45:22,264 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] regionserver.HStore(1541): 9fcd2d84f49ff088c7131b4ceaefae7a/info is initiating minor compaction (all files) 2024-11-18T22:45:22,264 INFO [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 9fcd2d84f49ff088c7131b4ceaefae7a/info in TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a. 2024-11-18T22:45:22,264 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=9fcd2d84f49ff088c7131b4ceaefae7a, regionState=OPEN, openSeqNum=127, regionLocation=3b0ca91ed64a,33617,1731969895393 2024-11-18T22:45:22,265 INFO [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/53b62ab98eed43c68127a2806d421ce6.0cda5b537c5711031301b341cec171a9->hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/53b62ab98eed43c68127a2806d421ce6-top, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/TestLogRolling-testLogRolling=0cda5b537c5711031301b341cec171a9-79094f4de79948b4a524c5894275f97d, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/TestLogRolling-testLogRolling=0cda5b537c5711031301b341cec171a9-fd9322b8b1ad47059e8c03a0b5f36e5f, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/TestLogRolling-testLogRolling=0cda5b537c5711031301b341cec171a9-56878517aadb4cb98cbf8e812279da11] into tmpdir=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp, totalSize=120.8 K 2024-11-18T22:45:22,267 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9fcd2d84f49ff088c7131b4ceaefae7a, server=3b0ca91ed64a,33617,1731969895393 because future has completed 2024-11-18T22:45:22,269 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] compactions.Compactor(225): Compacting 53b62ab98eed43c68127a2806d421ce6.0cda5b537c5711031301b341cec171a9, keycount=33, bloomtype=ROW, size=74.7 K, encoding=NONE, compression=NONE, seqNum=84, earliestPutTs=1731969906746 2024-11-18T22:45:22,270 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=0cda5b537c5711031301b341cec171a9-79094f4de79948b4a524c5894275f97d, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=97, earliestPutTs=1731969921113 2024-11-18T22:45:22,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741852_1028 (size=9847) 2024-11-18T22:45:22,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741852_1028 (size=9847) 2024-11-18T22:45:22,271 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=0cda5b537c5711031301b341cec171a9-fd9322b8b1ad47059e8c03a0b5f36e5f, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1731969921142 2024-11-18T22:45:22,285 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-18T22:45:22,285 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 9fcd2d84f49ff088c7131b4ceaefae7a, server=3b0ca91ed64a,33617,1731969895393 in 391 msec 2024-11-18T22:45:22,287 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=0cda5b537c5711031301b341cec171a9-56878517aadb4cb98cbf8e812279da11, keycount=2, bloomtype=ROW, size=6.9 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1731969921193 2024-11-18T22:45:22,288 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/hbase/meta/1588230740/.tmp/info/4707685cf95e47d2ad2bbe1389ab534f 2024-11-18T22:45:22,290 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-18T22:45:22,290 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9fcd2d84f49ff088c7131b4ceaefae7a, ASSIGN in 561 msec 2024-11-18T22:45:22,292 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=0cda5b537c5711031301b341cec171a9, daughterA=a1f3f5bbc20befd26afd04901ad35af5, daughterB=9fcd2d84f49ff088c7131b4ceaefae7a in 1.0740 sec 2024-11-18T22:45:22,352 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/hbase/meta/1588230740/.tmp/ns/0145900a3d6142869f18b936fcae2349 is 43, key is default/ns:d/1731969896541/Put/seqid=0 2024-11-18T22:45:22,376 INFO [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9fcd2d84f49ff088c7131b4ceaefae7a#info#compaction#70 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T22:45:22,377 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/b19a2131fd1849399033e76083bac5d0 is 1080, key is row0062/info:/1731969919101/Put/seqid=0 2024-11-18T22:45:22,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741853_1029 (size=5153) 2024-11-18T22:45:22,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741853_1029 (size=5153) 2024-11-18T22:45:22,382 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/hbase/meta/1588230740/.tmp/ns/0145900a3d6142869f18b936fcae2349 2024-11-18T22:45:22,416 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/hbase/meta/1588230740/.tmp/table/f621aab849f44880b47d0066296a8363 is 65, key is TestLogRolling-testLogRolling/table:state/1731969897034/Put/seqid=0 2024-11-18T22:45:22,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741854_1030 (size=43081) 2024-11-18T22:45:22,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741854_1030 (size=43081) 2024-11-18T22:45:22,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741855_1031 (size=5340) 2024-11-18T22:45:22,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741855_1031 (size=5340) 2024-11-18T22:45:22,465 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/hbase/meta/1588230740/.tmp/table/f621aab849f44880b47d0066296a8363 2024-11-18T22:45:22,472 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/b19a2131fd1849399033e76083bac5d0 as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/b19a2131fd1849399033e76083bac5d0 2024-11-18T22:45:22,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/hbase/meta/1588230740/.tmp/info/4707685cf95e47d2ad2bbe1389ab534f as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/hbase/meta/1588230740/info/4707685cf95e47d2ad2bbe1389ab534f 2024-11-18T22:45:22,495 INFO [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 9fcd2d84f49ff088c7131b4ceaefae7a/info of 9fcd2d84f49ff088c7131b4ceaefae7a into b19a2131fd1849399033e76083bac5d0(size=42.1 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T22:45:22,495 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 9fcd2d84f49ff088c7131b4ceaefae7a: 2024-11-18T22:45:22,496 INFO [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a., storeName=9fcd2d84f49ff088c7131b4ceaefae7a/info, priority=12, startTime=1731969922260; duration=0sec 2024-11-18T22:45:22,496 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:45:22,496 DEBUG [RS:0;3b0ca91ed64a:33617-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9fcd2d84f49ff088c7131b4ceaefae7a:info 2024-11-18T22:45:22,503 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/hbase/meta/1588230740/info/4707685cf95e47d2ad2bbe1389ab534f, entries=30, sequenceid=17, filesize=9.6 K 2024-11-18T22:45:22,505 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/hbase/meta/1588230740/.tmp/ns/0145900a3d6142869f18b936fcae2349 as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/hbase/meta/1588230740/ns/0145900a3d6142869f18b936fcae2349 2024-11-18T22:45:22,531 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/hbase/meta/1588230740/ns/0145900a3d6142869f18b936fcae2349, entries=2, sequenceid=17, filesize=5.0 K 2024-11-18T22:45:22,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/hbase/meta/1588230740/.tmp/table/f621aab849f44880b47d0066296a8363 as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/hbase/meta/1588230740/table/f621aab849f44880b47d0066296a8363 2024-11-18T22:45:22,557 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/hbase/meta/1588230740/table/f621aab849f44880b47d0066296a8363, entries=2, sequenceid=17, filesize=5.2 K 2024-11-18T22:45:22,559 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 382ms, sequenceid=17, compaction requested=false 2024-11-18T22:45:22,559 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-18T22:45:22,668 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/a1f3f5bbc20befd26afd04901ad35af5/.tmp/info/cd91c555daac459381673e9c7a03f84a as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/a1f3f5bbc20befd26afd04901ad35af5/info/cd91c555daac459381673e9c7a03f84a 2024-11-18T22:45:22,693 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in a1f3f5bbc20befd26afd04901ad35af5/info of a1f3f5bbc20befd26afd04901ad35af5 into cd91c555daac459381673e9c7a03f84a(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T22:45:22,693 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a1f3f5bbc20befd26afd04901ad35af5: 2024-11-18T22:45:22,693 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731969921216.a1f3f5bbc20befd26afd04901ad35af5., storeName=a1f3f5bbc20befd26afd04901ad35af5/info, priority=15, startTime=1731969922143; duration=0sec 2024-11-18T22:45:22,694 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:45:22,694 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a1f3f5bbc20befd26afd04901ad35af5:info 2024-11-18T22:45:22,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:23,073 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:23,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33617 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:56650 deadline: 1731969933207, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9. is not online on 3b0ca91ed64a,33617,1731969895393 2024-11-18T22:45:23,209 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9., hostname=3b0ca91ed64a,33617,1731969895393, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9., hostname=3b0ca91ed64a,33617,1731969895393, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9. is not online on 3b0ca91ed64a,33617,1731969895393 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-18T22:45:23,209 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9., hostname=3b0ca91ed64a,33617,1731969895393, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9. is not online on 3b0ca91ed64a,33617,1731969895393 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-18T22:45:23,209 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731969896633.0cda5b537c5711031301b341cec171a9., hostname=3b0ca91ed64a,33617,1731969895393, seqNum=2 from cache 2024-11-18T22:45:23,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:23,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:24,073 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:24,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:24,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:25,074 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:25,195 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-18T22:45:25,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:25,836 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:25,974 INFO [FSHLog-0-hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData-prefix:3b0ca91ed64a,41377,1731969895226 {}] wal.AbstractFSWAL(1368): Slow sync cost: 223 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39621,DS-51de98e0-fcb4-424f-8d02-fa3101330b24,DISK], DatanodeInfoWithStorage[127.0.0.1:43435,DS-d4915a39-14bb-46ba-a78d-7c003f4de1a5,DISK]] 2024-11-18T22:45:26,075 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:26,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:26,480 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:26,480 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:26,480 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:26,481 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:26,481 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:26,481 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:26,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:26,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:26,541 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:26,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:26,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:26,542 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:26,547 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:26,548 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:26,548 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:26,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:27,059 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T22:45:27,061 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:27,061 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:27,061 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:27,061 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:27,061 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:27,062 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:27,076 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:27,102 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:27,103 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:27,103 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:27,103 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:27,103 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:27,104 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:27,110 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:27,110 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:27,111 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:27,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:27,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:28,077 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:28,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:28,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:29,077 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:29,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:29,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:30,078 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:30,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:30,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:31,078 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:31,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:31,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:32,079 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:32,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:32,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:33,080 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:33,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:33,270 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a., hostname=3b0ca91ed64a,33617,1731969895393, seqNum=127] 2024-11-18T22:45:33,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33617 {}] regionserver.HRegion(8855): Flush requested on 9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:33,289 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9fcd2d84f49ff088c7131b4ceaefae7a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T22:45:33,298 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/e6a7c8aacfc0491882710f9e553c570d is 1080, key is row0097/info:/1731969933272/Put/seqid=0 2024-11-18T22:45:33,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741856_1032 (size=12516) 2024-11-18T22:45:33,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741856_1032 (size=12516) 2024-11-18T22:45:33,313 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/e6a7c8aacfc0491882710f9e553c570d 2024-11-18T22:45:33,326 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/e6a7c8aacfc0491882710f9e553c570d as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/e6a7c8aacfc0491882710f9e553c570d 2024-11-18T22:45:33,335 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/e6a7c8aacfc0491882710f9e553c570d, entries=7, sequenceid=137, filesize=12.2 K 2024-11-18T22:45:33,336 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 9fcd2d84f49ff088c7131b4ceaefae7a in 47ms, sequenceid=137, compaction requested=false 2024-11-18T22:45:33,337 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9fcd2d84f49ff088c7131b4ceaefae7a: 2024-11-18T22:45:33,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33617 {}] regionserver.HRegion(8855): Flush requested on 9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:33,339 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9fcd2d84f49ff088c7131b4ceaefae7a 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-18T22:45:33,344 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/f90da1a69e2e494c95e2662b37c58fbc is 1080, key is row0104/info:/1731969933291/Put/seqid=0 2024-11-18T22:45:33,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741857_1033 (size=19000) 2024-11-18T22:45:33,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741857_1033 (size=19000) 2024-11-18T22:45:33,349 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/f90da1a69e2e494c95e2662b37c58fbc 2024-11-18T22:45:33,358 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/f90da1a69e2e494c95e2662b37c58fbc as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/f90da1a69e2e494c95e2662b37c58fbc 2024-11-18T22:45:33,364 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/f90da1a69e2e494c95e2662b37c58fbc, entries=13, sequenceid=153, filesize=18.6 K 2024-11-18T22:45:33,365 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=7.36 KB/7532 for 9fcd2d84f49ff088c7131b4ceaefae7a in 26ms, sequenceid=153, compaction requested=true 2024-11-18T22:45:33,365 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9fcd2d84f49ff088c7131b4ceaefae7a: 2024-11-18T22:45:33,365 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9fcd2d84f49ff088c7131b4ceaefae7a:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T22:45:33,365 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:45:33,365 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T22:45:33,366 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 74597 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T22:45:33,366 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HStore(1541): 9fcd2d84f49ff088c7131b4ceaefae7a/info is initiating minor compaction (all files) 2024-11-18T22:45:33,366 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 9fcd2d84f49ff088c7131b4ceaefae7a/info in TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a. 2024-11-18T22:45:33,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33617 {}] regionserver.HRegion(8855): Flush requested on 9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:33,367 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9fcd2d84f49ff088c7131b4ceaefae7a 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-18T22:45:33,367 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/b19a2131fd1849399033e76083bac5d0, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/e6a7c8aacfc0491882710f9e553c570d, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/f90da1a69e2e494c95e2662b37c58fbc] into tmpdir=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp, totalSize=72.8 K 2024-11-18T22:45:33,367 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.Compactor(225): Compacting b19a2131fd1849399033e76083bac5d0, keycount=35, bloomtype=ROW, size=42.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1731969919101 2024-11-18T22:45:33,367 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.Compactor(225): Compacting e6a7c8aacfc0491882710f9e553c570d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1731969933272 2024-11-18T22:45:33,368 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.Compactor(225): Compacting f90da1a69e2e494c95e2662b37c58fbc, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1731969933291 2024-11-18T22:45:33,371 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/bf4573cec3434ee0bfcef941e92738f1 is 1080, key is row0117/info:/1731969933341/Put/seqid=0 2024-11-18T22:45:33,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741858_1034 (size=13594) 2024-11-18T22:45:33,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741858_1034 (size=13594) 2024-11-18T22:45:33,387 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/bf4573cec3434ee0bfcef941e92738f1 2024-11-18T22:45:33,393 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9fcd2d84f49ff088c7131b4ceaefae7a#info#compaction#75 average throughput is 28.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T22:45:33,394 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/def92ea13cd6403097bc9090be8b7619 is 1080, key is row0062/info:/1731969919101/Put/seqid=0 2024-11-18T22:45:33,395 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/bf4573cec3434ee0bfcef941e92738f1 as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/bf4573cec3434ee0bfcef941e92738f1 2024-11-18T22:45:33,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741859_1035 (size=64811) 2024-11-18T22:45:33,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741859_1035 (size=64811) 2024-11-18T22:45:33,405 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/bf4573cec3434ee0bfcef941e92738f1, entries=8, sequenceid=164, filesize=13.3 K 2024-11-18T22:45:33,406 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=4.20 KB/4304 for 9fcd2d84f49ff088c7131b4ceaefae7a in 39ms, sequenceid=164, compaction requested=false 2024-11-18T22:45:33,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9fcd2d84f49ff088c7131b4ceaefae7a: 2024-11-18T22:45:33,411 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/def92ea13cd6403097bc9090be8b7619 as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/def92ea13cd6403097bc9090be8b7619 2024-11-18T22:45:33,418 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 9fcd2d84f49ff088c7131b4ceaefae7a/info of 9fcd2d84f49ff088c7131b4ceaefae7a into def92ea13cd6403097bc9090be8b7619(size=63.3 K), total size for store is 76.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T22:45:33,418 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 9fcd2d84f49ff088c7131b4ceaefae7a: 2024-11-18T22:45:33,418 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a., storeName=9fcd2d84f49ff088c7131b4ceaefae7a/info, priority=13, startTime=1731969933365; duration=0sec 2024-11-18T22:45:33,418 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:45:33,418 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9fcd2d84f49ff088c7131b4ceaefae7a:info 2024-11-18T22:45:33,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:34,080 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:34,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:34,842 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:35,081 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:35,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:35,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33617 {}] regionserver.HRegion(8855): Flush requested on 9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:35,386 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9fcd2d84f49ff088c7131b4ceaefae7a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T22:45:35,392 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/7cfa9a7574a741969b2edd285a18d5a5 is 1080, key is row0125/info:/1731969933368/Put/seqid=0 2024-11-18T22:45:35,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741860_1036 (size=12516) 2024-11-18T22:45:35,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741860_1036 (size=12516) 2024-11-18T22:45:35,399 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/7cfa9a7574a741969b2edd285a18d5a5 2024-11-18T22:45:35,404 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/7cfa9a7574a741969b2edd285a18d5a5 as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/7cfa9a7574a741969b2edd285a18d5a5 2024-11-18T22:45:35,409 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/7cfa9a7574a741969b2edd285a18d5a5, entries=7, sequenceid=175, filesize=12.2 K 2024-11-18T22:45:35,410 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 9fcd2d84f49ff088c7131b4ceaefae7a in 24ms, sequenceid=175, compaction requested=true 2024-11-18T22:45:35,410 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9fcd2d84f49ff088c7131b4ceaefae7a: 2024-11-18T22:45:35,410 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9fcd2d84f49ff088c7131b4ceaefae7a:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T22:45:35,410 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:45:35,410 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T22:45:35,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33617 {}] regionserver.HRegion(8855): Flush requested on 9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:35,411 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9fcd2d84f49ff088c7131b4ceaefae7a 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-18T22:45:35,411 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 90921 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T22:45:35,411 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HStore(1541): 9fcd2d84f49ff088c7131b4ceaefae7a/info is initiating minor compaction (all files) 2024-11-18T22:45:35,412 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 9fcd2d84f49ff088c7131b4ceaefae7a/info in TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a. 2024-11-18T22:45:35,412 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/def92ea13cd6403097bc9090be8b7619, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/bf4573cec3434ee0bfcef941e92738f1, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/7cfa9a7574a741969b2edd285a18d5a5] into tmpdir=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp, totalSize=88.8 K 2024-11-18T22:45:35,412 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.Compactor(225): Compacting def92ea13cd6403097bc9090be8b7619, keycount=55, bloomtype=ROW, size=63.3 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1731969919101 2024-11-18T22:45:35,413 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.Compactor(225): Compacting bf4573cec3434ee0bfcef941e92738f1, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1731969933341 2024-11-18T22:45:35,413 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7cfa9a7574a741969b2edd285a18d5a5, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1731969933368 2024-11-18T22:45:35,415 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/9b37d828fa904e57a1d6e7f96b70e066 is 1080, key is row0132/info:/1731969935388/Put/seqid=0 2024-11-18T22:45:35,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741861_1037 (size=16828) 2024-11-18T22:45:35,434 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9fcd2d84f49ff088c7131b4ceaefae7a#info#compaction#78 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T22:45:35,435 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/1a39de087a3e4676ba2898f0864fe503 is 1080, key is row0062/info:/1731969919101/Put/seqid=0 2024-11-18T22:45:35,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741861_1037 (size=16828) 2024-11-18T22:45:35,437 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=189 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/9b37d828fa904e57a1d6e7f96b70e066 2024-11-18T22:45:35,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/9b37d828fa904e57a1d6e7f96b70e066 as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/9b37d828fa904e57a1d6e7f96b70e066 2024-11-18T22:45:35,452 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/9b37d828fa904e57a1d6e7f96b70e066, entries=11, sequenceid=189, filesize=16.4 K 2024-11-18T22:45:35,454 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=15.76 KB/16140 for 9fcd2d84f49ff088c7131b4ceaefae7a in 43ms, sequenceid=189, compaction requested=false 2024-11-18T22:45:35,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9fcd2d84f49ff088c7131b4ceaefae7a: 2024-11-18T22:45:35,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33617 {}] regionserver.HRegion(8855): Flush requested on 9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:35,457 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9fcd2d84f49ff088c7131b4ceaefae7a 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-18T22:45:35,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741862_1038 (size=81156) 2024-11-18T22:45:35,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741862_1038 (size=81156) 2024-11-18T22:45:35,465 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/d11dd6605b0d48e3a13025ea1379b32a is 1080, key is row0143/info:/1731969935412/Put/seqid=0 2024-11-18T22:45:35,472 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/1a39de087a3e4676ba2898f0864fe503 as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/1a39de087a3e4676ba2898f0864fe503 2024-11-18T22:45:35,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741863_1039 (size=22238) 2024-11-18T22:45:35,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741863_1039 (size=22238) 2024-11-18T22:45:35,481 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 9fcd2d84f49ff088c7131b4ceaefae7a/info of 9fcd2d84f49ff088c7131b4ceaefae7a into 1a39de087a3e4676ba2898f0864fe503(size=79.3 K), total size for store is 95.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T22:45:35,481 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 9fcd2d84f49ff088c7131b4ceaefae7a: 2024-11-18T22:45:35,481 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a., storeName=9fcd2d84f49ff088c7131b4ceaefae7a/info, priority=13, startTime=1731969935410; duration=0sec 2024-11-18T22:45:35,481 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:45:35,481 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9fcd2d84f49ff088c7131b4ceaefae7a:info 2024-11-18T22:45:35,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:35,875 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/d11dd6605b0d48e3a13025ea1379b32a 2024-11-18T22:45:35,881 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/d11dd6605b0d48e3a13025ea1379b32a as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/d11dd6605b0d48e3a13025ea1379b32a 2024-11-18T22:45:35,888 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/d11dd6605b0d48e3a13025ea1379b32a, entries=16, sequenceid=208, filesize=21.7 K 2024-11-18T22:45:35,893 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=2.10 KB/2152 for 9fcd2d84f49ff088c7131b4ceaefae7a in 437ms, sequenceid=208, compaction requested=true 2024-11-18T22:45:35,893 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9fcd2d84f49ff088c7131b4ceaefae7a: 2024-11-18T22:45:35,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9fcd2d84f49ff088c7131b4ceaefae7a:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T22:45:35,893 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:45:35,893 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T22:45:35,894 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 120222 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T22:45:35,894 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HStore(1541): 9fcd2d84f49ff088c7131b4ceaefae7a/info is initiating minor compaction (all files) 2024-11-18T22:45:35,894 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 9fcd2d84f49ff088c7131b4ceaefae7a/info in TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a. 2024-11-18T22:45:35,895 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/1a39de087a3e4676ba2898f0864fe503, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/9b37d828fa904e57a1d6e7f96b70e066, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/d11dd6605b0d48e3a13025ea1379b32a] into tmpdir=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp, totalSize=117.4 K 2024-11-18T22:45:35,895 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1a39de087a3e4676ba2898f0864fe503, keycount=70, bloomtype=ROW, size=79.3 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1731969919101 2024-11-18T22:45:35,896 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9b37d828fa904e57a1d6e7f96b70e066, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1731969935388 2024-11-18T22:45:35,896 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.Compactor(225): Compacting d11dd6605b0d48e3a13025ea1379b32a, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1731969935412 2024-11-18T22:45:35,910 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9fcd2d84f49ff088c7131b4ceaefae7a#info#compaction#80 average throughput is 33.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T22:45:35,910 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/dc41a7ca404c4342b25a5df247ab9c90 is 1080, key is row0062/info:/1731969919101/Put/seqid=0 2024-11-18T22:45:35,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741864_1040 (size=110372) 2024-11-18T22:45:35,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741864_1040 (size=110372) 2024-11-18T22:45:35,946 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/dc41a7ca404c4342b25a5df247ab9c90 as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/dc41a7ca404c4342b25a5df247ab9c90 2024-11-18T22:45:35,960 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 9fcd2d84f49ff088c7131b4ceaefae7a/info of 9fcd2d84f49ff088c7131b4ceaefae7a into dc41a7ca404c4342b25a5df247ab9c90(size=107.8 K), total size for store is 107.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T22:45:35,960 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 9fcd2d84f49ff088c7131b4ceaefae7a: 2024-11-18T22:45:35,960 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a., storeName=9fcd2d84f49ff088c7131b4ceaefae7a/info, priority=13, startTime=1731969935893; duration=0sec 2024-11-18T22:45:35,960 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:45:35,960 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9fcd2d84f49ff088c7131b4ceaefae7a:info 2024-11-18T22:45:36,082 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:36,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:36,844 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:37,082 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:37,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:37,270 INFO [master/3b0ca91ed64a:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-18T22:45:37,270 INFO [master/3b0ca91ed64a:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-18T22:45:37,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33617 {}] regionserver.HRegion(8855): Flush requested on 9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:37,477 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9fcd2d84f49ff088c7131b4ceaefae7a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T22:45:37,490 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/3c73b8ab3b4342629d4074f639bfa066 is 1080, key is row0159/info:/1731969935458/Put/seqid=0 2024-11-18T22:45:37,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741865_1041 (size=12516) 2024-11-18T22:45:37,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741865_1041 (size=12516) 2024-11-18T22:45:37,513 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=220 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/3c73b8ab3b4342629d4074f639bfa066 2024-11-18T22:45:37,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/3c73b8ab3b4342629d4074f639bfa066 as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/3c73b8ab3b4342629d4074f639bfa066 2024-11-18T22:45:37,532 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/3c73b8ab3b4342629d4074f639bfa066, entries=7, sequenceid=220, filesize=12.2 K 2024-11-18T22:45:37,535 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=19.96 KB/20444 for 9fcd2d84f49ff088c7131b4ceaefae7a in 58ms, sequenceid=220, compaction requested=false 2024-11-18T22:45:37,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9fcd2d84f49ff088c7131b4ceaefae7a: 2024-11-18T22:45:37,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33617 {}] regionserver.HRegion(8855): Flush requested on 9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:37,537 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9fcd2d84f49ff088c7131b4ceaefae7a 1/1 column families, dataSize=21.02 KB heapSize=22.75 KB 2024-11-18T22:45:37,546 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/dabfde11763645cc8d0dba51434e5c73 is 1080, key is row0166/info:/1731969937478/Put/seqid=0 2024-11-18T22:45:37,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741866_1042 (size=26550) 2024-11-18T22:45:37,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741866_1042 (size=26550) 2024-11-18T22:45:37,844 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:37,957 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=21.02 KB at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/dabfde11763645cc8d0dba51434e5c73 2024-11-18T22:45:37,964 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/dabfde11763645cc8d0dba51434e5c73 as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/dabfde11763645cc8d0dba51434e5c73 2024-11-18T22:45:37,968 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/dabfde11763645cc8d0dba51434e5c73, entries=20, sequenceid=243, filesize=25.9 K 2024-11-18T22:45:37,972 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~21.02 KB/21520, heapSize ~22.73 KB/23280, currentSize=7.36 KB/7532 for 9fcd2d84f49ff088c7131b4ceaefae7a in 435ms, sequenceid=243, compaction requested=true 2024-11-18T22:45:37,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9fcd2d84f49ff088c7131b4ceaefae7a: 2024-11-18T22:45:37,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9fcd2d84f49ff088c7131b4ceaefae7a:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T22:45:37,973 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T22:45:37,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:45:37,974 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 149438 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T22:45:37,974 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HStore(1541): 9fcd2d84f49ff088c7131b4ceaefae7a/info is initiating minor compaction (all files) 2024-11-18T22:45:37,974 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 9fcd2d84f49ff088c7131b4ceaefae7a/info in TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a. 2024-11-18T22:45:37,974 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/dc41a7ca404c4342b25a5df247ab9c90, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/3c73b8ab3b4342629d4074f639bfa066, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/dabfde11763645cc8d0dba51434e5c73] into tmpdir=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp, totalSize=145.9 K 2024-11-18T22:45:37,975 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.Compactor(225): Compacting dc41a7ca404c4342b25a5df247ab9c90, keycount=97, bloomtype=ROW, size=107.8 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1731969919101 2024-11-18T22:45:37,975 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3c73b8ab3b4342629d4074f639bfa066, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1731969935458 2024-11-18T22:45:37,976 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.Compactor(225): Compacting dabfde11763645cc8d0dba51434e5c73, keycount=20, bloomtype=ROW, size=25.9 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1731969937478 2024-11-18T22:45:38,000 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9fcd2d84f49ff088c7131b4ceaefae7a#info#compaction#83 average throughput is 42.41 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T22:45:38,000 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/4a3404ad03e543dd961fb03d37ff2eb0 is 1080, key is row0062/info:/1731969919101/Put/seqid=0 2024-11-18T22:45:38,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741867_1043 (size=139785) 2024-11-18T22:45:38,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741867_1043 (size=139785) 2024-11-18T22:45:38,014 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/4a3404ad03e543dd961fb03d37ff2eb0 as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/4a3404ad03e543dd961fb03d37ff2eb0 2024-11-18T22:45:38,020 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 9fcd2d84f49ff088c7131b4ceaefae7a/info of 9fcd2d84f49ff088c7131b4ceaefae7a into 4a3404ad03e543dd961fb03d37ff2eb0(size=136.5 K), total size for store is 136.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T22:45:38,021 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 9fcd2d84f49ff088c7131b4ceaefae7a: 2024-11-18T22:45:38,021 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a., storeName=9fcd2d84f49ff088c7131b4ceaefae7a/info, priority=13, startTime=1731969937973; duration=0sec 2024-11-18T22:45:38,021 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:45:38,021 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9fcd2d84f49ff088c7131b4ceaefae7a:info 2024-11-18T22:45:38,083 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:38,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:38,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:39,083 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:39,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:39,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33617 {}] regionserver.HRegion(8855): Flush requested on 9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:39,560 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9fcd2d84f49ff088c7131b4ceaefae7a 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-18T22:45:39,565 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/819e931a94a64a47ab18b7505e82835d is 1080, key is row0186/info:/1731969937539/Put/seqid=0 2024-11-18T22:45:39,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741868_1044 (size=13594) 2024-11-18T22:45:39,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741868_1044 (size=13594) 2024-11-18T22:45:39,576 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/819e931a94a64a47ab18b7505e82835d 2024-11-18T22:45:39,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/819e931a94a64a47ab18b7505e82835d as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/819e931a94a64a47ab18b7505e82835d 2024-11-18T22:45:39,590 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/819e931a94a64a47ab18b7505e82835d, entries=8, sequenceid=255, filesize=13.3 K 2024-11-18T22:45:39,591 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=11.56 KB/11836 for 9fcd2d84f49ff088c7131b4ceaefae7a in 31ms, sequenceid=255, compaction requested=false 2024-11-18T22:45:39,591 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9fcd2d84f49ff088c7131b4ceaefae7a: 2024-11-18T22:45:39,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33617 {}] regionserver.HRegion(8855): Flush requested on 9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:39,592 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9fcd2d84f49ff088c7131b4ceaefae7a 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-18T22:45:39,597 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/64a73ca591174f3aad3ff3357913594d is 1080, key is row0194/info:/1731969939561/Put/seqid=0 2024-11-18T22:45:39,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741869_1045 (size=17918) 2024-11-18T22:45:39,609 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/64a73ca591174f3aad3ff3357913594d 2024-11-18T22:45:39,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741869_1045 (size=17918) 2024-11-18T22:45:39,616 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/64a73ca591174f3aad3ff3357913594d as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/64a73ca591174f3aad3ff3357913594d 2024-11-18T22:45:39,622 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/64a73ca591174f3aad3ff3357913594d, entries=12, sequenceid=270, filesize=17.5 K 2024-11-18T22:45:39,623 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=11.56 KB/11836 for 9fcd2d84f49ff088c7131b4ceaefae7a in 31ms, sequenceid=270, compaction requested=true 2024-11-18T22:45:39,623 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9fcd2d84f49ff088c7131b4ceaefae7a: 2024-11-18T22:45:39,623 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T22:45:39,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9fcd2d84f49ff088c7131b4ceaefae7a:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T22:45:39,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:45:39,625 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 171297 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T22:45:39,625 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HStore(1541): 9fcd2d84f49ff088c7131b4ceaefae7a/info is initiating minor compaction (all files) 2024-11-18T22:45:39,625 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 9fcd2d84f49ff088c7131b4ceaefae7a/info in TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a. 2024-11-18T22:45:39,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33617 {}] regionserver.HRegion(8855): Flush requested on 9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:39,625 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/4a3404ad03e543dd961fb03d37ff2eb0, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/819e931a94a64a47ab18b7505e82835d, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/64a73ca591174f3aad3ff3357913594d] into tmpdir=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp, totalSize=167.3 K 2024-11-18T22:45:39,625 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9fcd2d84f49ff088c7131b4ceaefae7a 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-18T22:45:39,625 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4a3404ad03e543dd961fb03d37ff2eb0, keycount=124, bloomtype=ROW, size=136.5 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1731969919101 2024-11-18T22:45:39,626 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.Compactor(225): Compacting 819e931a94a64a47ab18b7505e82835d, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1731969937539 2024-11-18T22:45:39,626 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.Compactor(225): Compacting 64a73ca591174f3aad3ff3357913594d, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1731969939561 2024-11-18T22:45:39,629 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/e207d81fdb3846ac96f0f6f38eb2d4fb is 1080, key is row0206/info:/1731969939593/Put/seqid=0 2024-11-18T22:45:39,645 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9fcd2d84f49ff088c7131b4ceaefae7a#info#compaction#87 average throughput is 36.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T22:45:39,645 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/70a623fb8622435687d0647be4eefbce is 1080, key is row0062/info:/1731969919101/Put/seqid=0 2024-11-18T22:45:39,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741870_1046 (size=17918) 2024-11-18T22:45:39,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741870_1046 (size=17918) 2024-11-18T22:45:39,648 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/e207d81fdb3846ac96f0f6f38eb2d4fb 2024-11-18T22:45:39,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741871_1047 (size=161463) 2024-11-18T22:45:39,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741871_1047 (size=161463) 2024-11-18T22:45:39,656 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/e207d81fdb3846ac96f0f6f38eb2d4fb as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/e207d81fdb3846ac96f0f6f38eb2d4fb 2024-11-18T22:45:39,662 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/70a623fb8622435687d0647be4eefbce as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/70a623fb8622435687d0647be4eefbce 2024-11-18T22:45:39,664 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/e207d81fdb3846ac96f0f6f38eb2d4fb, entries=12, sequenceid=285, filesize=17.5 K 2024-11-18T22:45:39,665 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=7.36 KB/7532 for 9fcd2d84f49ff088c7131b4ceaefae7a in 40ms, sequenceid=285, compaction requested=false 2024-11-18T22:45:39,665 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9fcd2d84f49ff088c7131b4ceaefae7a: 2024-11-18T22:45:39,673 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 9fcd2d84f49ff088c7131b4ceaefae7a/info of 9fcd2d84f49ff088c7131b4ceaefae7a into 70a623fb8622435687d0647be4eefbce(size=157.7 K), total size for store is 175.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T22:45:39,673 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 9fcd2d84f49ff088c7131b4ceaefae7a: 2024-11-18T22:45:39,673 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a., storeName=9fcd2d84f49ff088c7131b4ceaefae7a/info, priority=13, startTime=1731969939623; duration=0sec 2024-11-18T22:45:39,673 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:45:39,673 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9fcd2d84f49ff088c7131b4ceaefae7a:info 2024-11-18T22:45:39,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:40,084 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:40,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:40,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:41,085 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:41,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:41,394 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-18T22:45:41,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33617 {}] regionserver.HRegion(8855): Flush requested on 9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:41,647 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9fcd2d84f49ff088c7131b4ceaefae7a 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-18T22:45:41,652 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/c2cb42d17fed4e1392d29c8ca649a7b8 is 1080, key is row0218/info:/1731969939626/Put/seqid=0 2024-11-18T22:45:41,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741872_1048 (size=13602) 2024-11-18T22:45:41,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741872_1048 (size=13602) 2024-11-18T22:45:41,658 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/c2cb42d17fed4e1392d29c8ca649a7b8 2024-11-18T22:45:41,664 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/c2cb42d17fed4e1392d29c8ca649a7b8 as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/c2cb42d17fed4e1392d29c8ca649a7b8 2024-11-18T22:45:41,669 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/c2cb42d17fed4e1392d29c8ca649a7b8, entries=8, sequenceid=297, filesize=13.3 K 2024-11-18T22:45:41,670 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=10.51 KB/10760 for 9fcd2d84f49ff088c7131b4ceaefae7a in 23ms, sequenceid=297, compaction requested=true 2024-11-18T22:45:41,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9fcd2d84f49ff088c7131b4ceaefae7a: 2024-11-18T22:45:41,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9fcd2d84f49ff088c7131b4ceaefae7a:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T22:45:41,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:45:41,671 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-18T22:45:41,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33617 {}] regionserver.HRegion(8855): Flush requested on 9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:41,671 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9fcd2d84f49ff088c7131b4ceaefae7a 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-18T22:45:41,672 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 192983 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-18T22:45:41,672 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HStore(1541): 9fcd2d84f49ff088c7131b4ceaefae7a/info is initiating minor compaction (all files) 2024-11-18T22:45:41,672 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 9fcd2d84f49ff088c7131b4ceaefae7a/info in TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a. 2024-11-18T22:45:41,672 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/70a623fb8622435687d0647be4eefbce, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/e207d81fdb3846ac96f0f6f38eb2d4fb, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/c2cb42d17fed4e1392d29c8ca649a7b8] into tmpdir=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp, totalSize=188.5 K 2024-11-18T22:45:41,672 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.Compactor(225): Compacting 70a623fb8622435687d0647be4eefbce, keycount=144, bloomtype=ROW, size=157.7 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1731969919101 2024-11-18T22:45:41,673 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.Compactor(225): Compacting e207d81fdb3846ac96f0f6f38eb2d4fb, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1731969939593 2024-11-18T22:45:41,674 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.Compactor(225): Compacting c2cb42d17fed4e1392d29c8ca649a7b8, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1731969939626 2024-11-18T22:45:41,675 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/e4f144c820cd4acca017ea704a356604 is 1080, key is row0226/info:/1731969941648/Put/seqid=0 2024-11-18T22:45:41,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741873_1049 (size=16839) 2024-11-18T22:45:41,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741873_1049 (size=16839) 2024-11-18T22:45:41,680 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/e4f144c820cd4acca017ea704a356604 2024-11-18T22:45:41,687 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/e4f144c820cd4acca017ea704a356604 as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/e4f144c820cd4acca017ea704a356604 2024-11-18T22:45:41,688 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9fcd2d84f49ff088c7131b4ceaefae7a#info#compaction#90 average throughput is 42.07 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T22:45:41,688 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/745d5f705b92420288955bf746eeb601 is 1080, key is row0062/info:/1731969919101/Put/seqid=0 2024-11-18T22:45:41,695 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/e4f144c820cd4acca017ea704a356604, entries=11, sequenceid=311, filesize=16.4 K 2024-11-18T22:45:41,696 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=6.30 KB/6456 for 9fcd2d84f49ff088c7131b4ceaefae7a in 25ms, sequenceid=311, compaction requested=false 2024-11-18T22:45:41,696 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9fcd2d84f49ff088c7131b4ceaefae7a: 2024-11-18T22:45:41,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33617 {}] regionserver.HRegion(8855): Flush requested on 9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:41,697 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9fcd2d84f49ff088c7131b4ceaefae7a 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-18T22:45:41,706 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/875f7dc412d847d1ab7a7870dba10f63 is 1080, key is row0237/info:/1731969941672/Put/seqid=0 2024-11-18T22:45:41,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741874_1050 (size=183149) 2024-11-18T22:45:41,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741875_1051 (size=12523) 2024-11-18T22:45:41,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741875_1051 (size=12523) 2024-11-18T22:45:41,713 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/875f7dc412d847d1ab7a7870dba10f63 2024-11-18T22:45:41,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741874_1050 (size=183149) 2024-11-18T22:45:41,720 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/745d5f705b92420288955bf746eeb601 as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/745d5f705b92420288955bf746eeb601 2024-11-18T22:45:41,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/875f7dc412d847d1ab7a7870dba10f63 as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/875f7dc412d847d1ab7a7870dba10f63 2024-11-18T22:45:41,726 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/875f7dc412d847d1ab7a7870dba10f63, entries=7, sequenceid=321, filesize=12.2 K 2024-11-18T22:45:41,727 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 9fcd2d84f49ff088c7131b4ceaefae7a in 29ms, sequenceid=321, compaction requested=false 2024-11-18T22:45:41,727 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 9fcd2d84f49ff088c7131b4ceaefae7a/info of 9fcd2d84f49ff088c7131b4ceaefae7a into 745d5f705b92420288955bf746eeb601(size=178.9 K), total size for store is 207.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T22:45:41,727 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 9fcd2d84f49ff088c7131b4ceaefae7a: 2024-11-18T22:45:41,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9fcd2d84f49ff088c7131b4ceaefae7a: 2024-11-18T22:45:41,727 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a., storeName=9fcd2d84f49ff088c7131b4ceaefae7a/info, priority=13, startTime=1731969941670; duration=0sec 2024-11-18T22:45:41,727 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:45:41,727 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9fcd2d84f49ff088c7131b4ceaefae7a:info 2024-11-18T22:45:41,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33617 {}] regionserver.HRegion(8855): Flush requested on 9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:41,728 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9fcd2d84f49ff088c7131b4ceaefae7a 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-18T22:45:41,732 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/e536f5566d6d4f1996949e2ca1ce61e4 is 1080, key is row0244/info:/1731969941699/Put/seqid=0 2024-11-18T22:45:41,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741876_1052 (size=17918) 2024-11-18T22:45:41,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741876_1052 (size=17918) 2024-11-18T22:45:41,737 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/e536f5566d6d4f1996949e2ca1ce61e4 2024-11-18T22:45:41,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/e536f5566d6d4f1996949e2ca1ce61e4 as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/e536f5566d6d4f1996949e2ca1ce61e4 2024-11-18T22:45:41,747 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/e536f5566d6d4f1996949e2ca1ce61e4, entries=12, sequenceid=337, filesize=17.5 K 2024-11-18T22:45:41,748 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=1.05 KB/1076 for 9fcd2d84f49ff088c7131b4ceaefae7a in 20ms, sequenceid=337, compaction requested=true 2024-11-18T22:45:41,748 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9fcd2d84f49ff088c7131b4ceaefae7a: 2024-11-18T22:45:41,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9fcd2d84f49ff088c7131b4ceaefae7a:info, priority=-2147483648, current under compaction store size is 1 2024-11-18T22:45:41,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:45:41,748 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-18T22:45:41,750 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 230429 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-18T22:45:41,750 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HStore(1541): 9fcd2d84f49ff088c7131b4ceaefae7a/info is initiating minor compaction (all files) 2024-11-18T22:45:41,750 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 9fcd2d84f49ff088c7131b4ceaefae7a/info in TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a. 2024-11-18T22:45:41,750 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/745d5f705b92420288955bf746eeb601, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/e4f144c820cd4acca017ea704a356604, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/875f7dc412d847d1ab7a7870dba10f63, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/e536f5566d6d4f1996949e2ca1ce61e4] into tmpdir=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp, totalSize=225.0 K 2024-11-18T22:45:41,750 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.Compactor(225): Compacting 745d5f705b92420288955bf746eeb601, keycount=164, bloomtype=ROW, size=178.9 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1731969919101 2024-11-18T22:45:41,751 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.Compactor(225): Compacting e4f144c820cd4acca017ea704a356604, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1731969941648 2024-11-18T22:45:41,751 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.Compactor(225): Compacting 875f7dc412d847d1ab7a7870dba10f63, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1731969941672 2024-11-18T22:45:41,751 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] compactions.Compactor(225): Compacting e536f5566d6d4f1996949e2ca1ce61e4, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=337, earliestPutTs=1731969941699 2024-11-18T22:45:41,765 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9fcd2d84f49ff088c7131b4ceaefae7a#info#compaction#93 average throughput is 49.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-18T22:45:41,766 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/b011b631f53c426b9072986196a4e4d7 is 1080, key is row0062/info:/1731969919101/Put/seqid=0 2024-11-18T22:45:41,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741877_1053 (size=215732) 2024-11-18T22:45:41,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741877_1053 (size=215732) 2024-11-18T22:45:41,774 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/b011b631f53c426b9072986196a4e4d7 as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/b011b631f53c426b9072986196a4e4d7 2024-11-18T22:45:41,780 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 9fcd2d84f49ff088c7131b4ceaefae7a/info of 9fcd2d84f49ff088c7131b4ceaefae7a into b011b631f53c426b9072986196a4e4d7(size=210.7 K), total size for store is 210.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-18T22:45:41,780 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 9fcd2d84f49ff088c7131b4ceaefae7a: 2024-11-18T22:45:41,780 INFO [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a., storeName=9fcd2d84f49ff088c7131b4ceaefae7a/info, priority=12, startTime=1731969941748; duration=0sec 2024-11-18T22:45:41,780 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-18T22:45:41,780 DEBUG [RS:0;3b0ca91ed64a:33617-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9fcd2d84f49ff088c7131b4ceaefae7a:info 2024-11-18T22:45:41,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:42,085 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:42,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:42,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:43,086 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:43,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:43,731 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-18T22:45:43,731 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C33617%2C1731969895393.1731969943731 2024-11-18T22:45:43,737 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:43,737 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:43,737 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:43,737 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:43,737 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:43,737 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/WALs/3b0ca91ed64a,33617,1731969895393/3b0ca91ed64a%2C33617%2C1731969895393.1731969896008 with entries=317, filesize=310.21 KB; new WAL /user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/WALs/3b0ca91ed64a,33617,1731969895393/3b0ca91ed64a%2C33617%2C1731969895393.1731969943731 2024-11-18T22:45:43,738 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43885:43885),(127.0.0.1/127.0.0.1:40677:40677)] 2024-11-18T22:45:43,738 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/WALs/3b0ca91ed64a,33617,1731969895393/3b0ca91ed64a%2C33617%2C1731969895393.1731969896008 is not closed yet, will try archiving it next time 2024-11-18T22:45:43,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741833_1009 (size=317667) 2024-11-18T22:45:43,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741833_1009 (size=317667) 2024-11-18T22:45:43,743 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for a1f3f5bbc20befd26afd04901ad35af5: 2024-11-18T22:45:43,743 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-18T22:45:43,747 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/hbase/meta/1588230740/.tmp/info/4617ec5c71f24d20b5fcabd591121f28 is 193, key is TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a./info:regioninfo/1731969922264/Put/seqid=0 2024-11-18T22:45:43,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741879_1055 (size=6223) 2024-11-18T22:45:43,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741879_1055 (size=6223) 2024-11-18T22:45:43,758 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/hbase/meta/1588230740/.tmp/info/4617ec5c71f24d20b5fcabd591121f28 2024-11-18T22:45:43,766 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/hbase/meta/1588230740/.tmp/info/4617ec5c71f24d20b5fcabd591121f28 as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/hbase/meta/1588230740/info/4617ec5c71f24d20b5fcabd591121f28 2024-11-18T22:45:43,777 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/hbase/meta/1588230740/info/4617ec5c71f24d20b5fcabd591121f28, entries=5, sequenceid=21, filesize=6.1 K 2024-11-18T22:45:43,778 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 35ms, sequenceid=21, compaction requested=false 2024-11-18T22:45:43,778 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-18T22:45:43,778 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 9fcd2d84f49ff088c7131b4ceaefae7a 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-18T22:45:43,783 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/ebdf24c7b82a40bc900b91d8e9ad5649 is 1080, key is row0256/info:/1731969941729/Put/seqid=0 2024-11-18T22:45:43,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741880_1056 (size=6035) 2024-11-18T22:45:43,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741880_1056 (size=6035) 2024-11-18T22:45:43,788 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/ebdf24c7b82a40bc900b91d8e9ad5649 2024-11-18T22:45:43,801 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/.tmp/info/ebdf24c7b82a40bc900b91d8e9ad5649 as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/ebdf24c7b82a40bc900b91d8e9ad5649 2024-11-18T22:45:43,806 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/ebdf24c7b82a40bc900b91d8e9ad5649, entries=1, sequenceid=342, filesize=5.9 K 2024-11-18T22:45:43,807 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 9fcd2d84f49ff088c7131b4ceaefae7a in 29ms, sequenceid=342, compaction requested=false 2024-11-18T22:45:43,807 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 9fcd2d84f49ff088c7131b4ceaefae7a: 2024-11-18T22:45:43,808 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C33617%2C1731969895393.1731969943807 2024-11-18T22:45:43,813 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:43,813 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:43,813 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:43,813 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:43,813 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:43,813 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/WALs/3b0ca91ed64a,33617,1731969895393/3b0ca91ed64a%2C33617%2C1731969895393.1731969943731 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/WALs/3b0ca91ed64a,33617,1731969895393/3b0ca91ed64a%2C33617%2C1731969895393.1731969943807 2024-11-18T22:45:43,814 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43885:43885),(127.0.0.1/127.0.0.1:40677:40677)] 2024-11-18T22:45:43,814 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/WALs/3b0ca91ed64a,33617,1731969895393/3b0ca91ed64a%2C33617%2C1731969895393.1731969943731 is not closed yet, will try archiving it next time 2024-11-18T22:45:43,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741878_1054 (size=731) 2024-11-18T22:45:43,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741878_1054 (size=731) 2024-11-18T22:45:43,820 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/WALs/3b0ca91ed64a,33617,1731969895393/3b0ca91ed64a%2C33617%2C1731969895393.1731969896008 to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/oldWALs/3b0ca91ed64a%2C33617%2C1731969895393.1731969896008 2024-11-18T22:45:43,821 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-18T22:45:43,822 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/WALs/3b0ca91ed64a,33617,1731969895393/3b0ca91ed64a%2C33617%2C1731969895393.1731969943731 to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/oldWALs/3b0ca91ed64a%2C33617%2C1731969895393.1731969943731 2024-11-18T22:45:43,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:43,921 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T22:45:43,922 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T22:45:43,922 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T22:45:43,922 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:45:43,922 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:45:43,922 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T22:45:43,922 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T22:45:43,922 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1469524132, stopped=false 2024-11-18T22:45:43,922 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3b0ca91ed64a,41377,1731969895226 2024-11-18T22:45:43,932 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T22:45:43,932 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33617-0x101501f46cc0001, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T22:45:43,932 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:45:43,932 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33617-0x101501f46cc0001, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:45:43,932 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T22:45:43,933 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T22:45:43,933 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T22:45:43,933 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:45:43,933 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3b0ca91ed64a,33617,1731969895393' ***** 2024-11-18T22:45:43,933 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T22:45:43,933 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33617-0x101501f46cc0001, quorum=127.0.0.1:51879, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:45:43,933 INFO [RS:0;3b0ca91ed64a:33617 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T22:45:43,933 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T22:45:43,934 INFO [RS:0;3b0ca91ed64a:33617 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T22:45:43,934 INFO [RS:0;3b0ca91ed64a:33617 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T22:45:43,934 INFO [RS:0;3b0ca91ed64a:33617 {}] regionserver.HRegionServer(3091): Received CLOSE for a1f3f5bbc20befd26afd04901ad35af5 2024-11-18T22:45:43,934 INFO [RS:0;3b0ca91ed64a:33617 {}] regionserver.HRegionServer(3091): Received CLOSE for 9fcd2d84f49ff088c7131b4ceaefae7a 2024-11-18T22:45:43,934 INFO [RS:0;3b0ca91ed64a:33617 {}] regionserver.HRegionServer(959): stopping server 3b0ca91ed64a,33617,1731969895393 2024-11-18T22:45:43,934 INFO [RS:0;3b0ca91ed64a:33617 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T22:45:43,934 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a1f3f5bbc20befd26afd04901ad35af5, disabling compactions & flushes 2024-11-18T22:45:43,934 INFO [RS:0;3b0ca91ed64a:33617 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3b0ca91ed64a:33617. 2024-11-18T22:45:43,934 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731969921216.a1f3f5bbc20befd26afd04901ad35af5. 2024-11-18T22:45:43,934 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731969921216.a1f3f5bbc20befd26afd04901ad35af5. 2024-11-18T22:45:43,934 DEBUG [RS:0;3b0ca91ed64a:33617 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T22:45:43,934 DEBUG [RS:0;3b0ca91ed64a:33617 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:45:43,934 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731969921216.a1f3f5bbc20befd26afd04901ad35af5. after waiting 0 ms 2024-11-18T22:45:43,934 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731969921216.a1f3f5bbc20befd26afd04901ad35af5. 2024-11-18T22:45:43,934 INFO [RS:0;3b0ca91ed64a:33617 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T22:45:43,934 INFO [RS:0;3b0ca91ed64a:33617 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T22:45:43,934 INFO [RS:0;3b0ca91ed64a:33617 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T22:45:43,934 INFO [RS:0;3b0ca91ed64a:33617 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T22:45:43,935 INFO [RS:0;3b0ca91ed64a:33617 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-18T22:45:43,935 DEBUG [RS:0;3b0ca91ed64a:33617 {}] regionserver.HRegionServer(1325): Online Regions={a1f3f5bbc20befd26afd04901ad35af5=TestLogRolling-testLogRolling,,1731969921216.a1f3f5bbc20befd26afd04901ad35af5., 1588230740=hbase:meta,,1.1588230740, 9fcd2d84f49ff088c7131b4ceaefae7a=TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.} 2024-11-18T22:45:43,935 DEBUG [RS:0;3b0ca91ed64a:33617 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 9fcd2d84f49ff088c7131b4ceaefae7a, a1f3f5bbc20befd26afd04901ad35af5 2024-11-18T22:45:43,935 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731969921216.a1f3f5bbc20befd26afd04901ad35af5.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/a1f3f5bbc20befd26afd04901ad35af5/info/53b62ab98eed43c68127a2806d421ce6.0cda5b537c5711031301b341cec171a9->hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/53b62ab98eed43c68127a2806d421ce6-bottom] to archive 2024-11-18T22:45:43,936 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731969921216.a1f3f5bbc20befd26afd04901ad35af5.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T22:45:43,936 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T22:45:43,936 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T22:45:43,936 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T22:45:43,936 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T22:45:43,936 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T22:45:43,936 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:45:43,938 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731969921216.a1f3f5bbc20befd26afd04901ad35af5.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/a1f3f5bbc20befd26afd04901ad35af5/info/53b62ab98eed43c68127a2806d421ce6.0cda5b537c5711031301b341cec171a9 to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/a1f3f5bbc20befd26afd04901ad35af5/info/53b62ab98eed43c68127a2806d421ce6.0cda5b537c5711031301b341cec171a9 2024-11-18T22:45:43,938 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731969921216.a1f3f5bbc20befd26afd04901ad35af5.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=3b0ca91ed64a:41377 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-18T22:45:43,939 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731969921216.a1f3f5bbc20befd26afd04901ad35af5.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-18T22:45:43,941 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-18T22:45:43,942 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T22:45:43,942 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T22:45:43,942 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731969943936Running coprocessor pre-close hooks at 1731969943936Disabling compacts and flushes for region at 1731969943936Disabling writes for close at 1731969943936Writing region close event to WAL at 1731969943937 (+1 ms)Running coprocessor post-close hooks at 1731969943942 (+5 ms)Closed at 1731969943942 2024-11-18T22:45:43,942 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T22:45:43,943 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/a1f3f5bbc20befd26afd04901ad35af5/recovered.edits/131.seqid, newMaxSeqId=131, maxSeqId=126 2024-11-18T22:45:43,943 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731969921216.a1f3f5bbc20befd26afd04901ad35af5. 2024-11-18T22:45:43,943 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a1f3f5bbc20befd26afd04901ad35af5: Waiting for close lock at 1731969943934Running coprocessor pre-close hooks at 1731969943934Disabling compacts and flushes for region at 1731969943934Disabling writes for close at 1731969943934Writing region close event to WAL at 1731969943939 (+5 ms)Running coprocessor post-close hooks at 1731969943943 (+4 ms)Closed at 1731969943943 2024-11-18T22:45:43,943 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731969921216.a1f3f5bbc20befd26afd04901ad35af5. 2024-11-18T22:45:43,944 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 9fcd2d84f49ff088c7131b4ceaefae7a, disabling compactions & flushes 2024-11-18T22:45:43,944 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a. 2024-11-18T22:45:43,944 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a. 2024-11-18T22:45:43,944 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a. after waiting 0 ms 2024-11-18T22:45:43,944 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a. 2024-11-18T22:45:43,944 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/53b62ab98eed43c68127a2806d421ce6.0cda5b537c5711031301b341cec171a9->hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/0cda5b537c5711031301b341cec171a9/info/53b62ab98eed43c68127a2806d421ce6-top, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/TestLogRolling-testLogRolling=0cda5b537c5711031301b341cec171a9-79094f4de79948b4a524c5894275f97d, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/TestLogRolling-testLogRolling=0cda5b537c5711031301b341cec171a9-fd9322b8b1ad47059e8c03a0b5f36e5f, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/b19a2131fd1849399033e76083bac5d0, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/TestLogRolling-testLogRolling=0cda5b537c5711031301b341cec171a9-56878517aadb4cb98cbf8e812279da11, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/e6a7c8aacfc0491882710f9e553c570d, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/def92ea13cd6403097bc9090be8b7619, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/f90da1a69e2e494c95e2662b37c58fbc, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/bf4573cec3434ee0bfcef941e92738f1, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/1a39de087a3e4676ba2898f0864fe503, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/7cfa9a7574a741969b2edd285a18d5a5, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/9b37d828fa904e57a1d6e7f96b70e066, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/dc41a7ca404c4342b25a5df247ab9c90, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/d11dd6605b0d48e3a13025ea1379b32a, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/3c73b8ab3b4342629d4074f639bfa066, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/4a3404ad03e543dd961fb03d37ff2eb0, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/dabfde11763645cc8d0dba51434e5c73, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/819e931a94a64a47ab18b7505e82835d, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/70a623fb8622435687d0647be4eefbce, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/64a73ca591174f3aad3ff3357913594d, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/e207d81fdb3846ac96f0f6f38eb2d4fb, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/745d5f705b92420288955bf746eeb601, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/c2cb42d17fed4e1392d29c8ca649a7b8, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/e4f144c820cd4acca017ea704a356604, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/875f7dc412d847d1ab7a7870dba10f63, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/e536f5566d6d4f1996949e2ca1ce61e4] to archive 2024-11-18T22:45:43,946 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-18T22:45:43,947 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/53b62ab98eed43c68127a2806d421ce6.0cda5b537c5711031301b341cec171a9 to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/53b62ab98eed43c68127a2806d421ce6.0cda5b537c5711031301b341cec171a9 2024-11-18T22:45:43,948 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/TestLogRolling-testLogRolling=0cda5b537c5711031301b341cec171a9-79094f4de79948b4a524c5894275f97d to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/TestLogRolling-testLogRolling=0cda5b537c5711031301b341cec171a9-79094f4de79948b4a524c5894275f97d 2024-11-18T22:45:43,950 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/TestLogRolling-testLogRolling=0cda5b537c5711031301b341cec171a9-fd9322b8b1ad47059e8c03a0b5f36e5f to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/TestLogRolling-testLogRolling=0cda5b537c5711031301b341cec171a9-fd9322b8b1ad47059e8c03a0b5f36e5f 2024-11-18T22:45:43,951 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/b19a2131fd1849399033e76083bac5d0 to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/b19a2131fd1849399033e76083bac5d0 2024-11-18T22:45:43,952 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/TestLogRolling-testLogRolling=0cda5b537c5711031301b341cec171a9-56878517aadb4cb98cbf8e812279da11 to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/TestLogRolling-testLogRolling=0cda5b537c5711031301b341cec171a9-56878517aadb4cb98cbf8e812279da11 2024-11-18T22:45:43,953 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/e6a7c8aacfc0491882710f9e553c570d to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/e6a7c8aacfc0491882710f9e553c570d 2024-11-18T22:45:43,954 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/def92ea13cd6403097bc9090be8b7619 to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/def92ea13cd6403097bc9090be8b7619 2024-11-18T22:45:43,956 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/f90da1a69e2e494c95e2662b37c58fbc to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/f90da1a69e2e494c95e2662b37c58fbc 2024-11-18T22:45:43,957 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/bf4573cec3434ee0bfcef941e92738f1 to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/bf4573cec3434ee0bfcef941e92738f1 2024-11-18T22:45:43,958 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/1a39de087a3e4676ba2898f0864fe503 to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/1a39de087a3e4676ba2898f0864fe503 2024-11-18T22:45:43,960 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/7cfa9a7574a741969b2edd285a18d5a5 to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/7cfa9a7574a741969b2edd285a18d5a5 2024-11-18T22:45:43,961 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/9b37d828fa904e57a1d6e7f96b70e066 to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/9b37d828fa904e57a1d6e7f96b70e066 2024-11-18T22:45:43,962 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/dc41a7ca404c4342b25a5df247ab9c90 to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/dc41a7ca404c4342b25a5df247ab9c90 2024-11-18T22:45:43,963 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/d11dd6605b0d48e3a13025ea1379b32a to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/d11dd6605b0d48e3a13025ea1379b32a 2024-11-18T22:45:43,964 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/3c73b8ab3b4342629d4074f639bfa066 to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/3c73b8ab3b4342629d4074f639bfa066 2024-11-18T22:45:43,965 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/4a3404ad03e543dd961fb03d37ff2eb0 to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/4a3404ad03e543dd961fb03d37ff2eb0 2024-11-18T22:45:43,966 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/dabfde11763645cc8d0dba51434e5c73 to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/dabfde11763645cc8d0dba51434e5c73 2024-11-18T22:45:43,967 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/819e931a94a64a47ab18b7505e82835d to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/819e931a94a64a47ab18b7505e82835d 2024-11-18T22:45:43,969 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/70a623fb8622435687d0647be4eefbce to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/70a623fb8622435687d0647be4eefbce 2024-11-18T22:45:43,970 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/64a73ca591174f3aad3ff3357913594d to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/64a73ca591174f3aad3ff3357913594d 2024-11-18T22:45:43,971 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/e207d81fdb3846ac96f0f6f38eb2d4fb to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/e207d81fdb3846ac96f0f6f38eb2d4fb 2024-11-18T22:45:43,972 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/745d5f705b92420288955bf746eeb601 to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/745d5f705b92420288955bf746eeb601 2024-11-18T22:45:43,973 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/c2cb42d17fed4e1392d29c8ca649a7b8 to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/c2cb42d17fed4e1392d29c8ca649a7b8 2024-11-18T22:45:43,974 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/e4f144c820cd4acca017ea704a356604 to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/e4f144c820cd4acca017ea704a356604 2024-11-18T22:45:43,976 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/875f7dc412d847d1ab7a7870dba10f63 to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/875f7dc412d847d1ab7a7870dba10f63 2024-11-18T22:45:43,977 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/e536f5566d6d4f1996949e2ca1ce61e4 to hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/archive/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/info/e536f5566d6d4f1996949e2ca1ce61e4 2024-11-18T22:45:43,977 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [b19a2131fd1849399033e76083bac5d0=43081, e6a7c8aacfc0491882710f9e553c570d=12516, def92ea13cd6403097bc9090be8b7619=64811, f90da1a69e2e494c95e2662b37c58fbc=19000, bf4573cec3434ee0bfcef941e92738f1=13594, 1a39de087a3e4676ba2898f0864fe503=81156, 7cfa9a7574a741969b2edd285a18d5a5=12516, 9b37d828fa904e57a1d6e7f96b70e066=16828, dc41a7ca404c4342b25a5df247ab9c90=110372, d11dd6605b0d48e3a13025ea1379b32a=22238, 3c73b8ab3b4342629d4074f639bfa066=12516, 4a3404ad03e543dd961fb03d37ff2eb0=139785, dabfde11763645cc8d0dba51434e5c73=26550, 819e931a94a64a47ab18b7505e82835d=13594, 70a623fb8622435687d0647be4eefbce=161463, 64a73ca591174f3aad3ff3357913594d=17918, e207d81fdb3846ac96f0f6f38eb2d4fb=17918, 745d5f705b92420288955bf746eeb601=183149, c2cb42d17fed4e1392d29c8ca649a7b8=13602, e4f144c820cd4acca017ea704a356604=16839, 875f7dc412d847d1ab7a7870dba10f63=12523, e536f5566d6d4f1996949e2ca1ce61e4=17918] 2024-11-18T22:45:43,981 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/data/default/TestLogRolling-testLogRolling/9fcd2d84f49ff088c7131b4ceaefae7a/recovered.edits/345.seqid, newMaxSeqId=345, maxSeqId=126 2024-11-18T22:45:43,981 INFO [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a. 2024-11-18T22:45:43,981 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 9fcd2d84f49ff088c7131b4ceaefae7a: Waiting for close lock at 1731969943944Running coprocessor pre-close hooks at 1731969943944Disabling compacts and flushes for region at 1731969943944Disabling writes for close at 1731969943944Writing region close event to WAL at 1731969943978 (+34 ms)Running coprocessor post-close hooks at 1731969943981 (+3 ms)Closed at 1731969943981 2024-11-18T22:45:43,981 DEBUG [RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731969921216.9fcd2d84f49ff088c7131b4ceaefae7a. 2024-11-18T22:45:44,086 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:44,135 INFO [RS:0;3b0ca91ed64a:33617 {}] regionserver.HRegionServer(976): stopping server 3b0ca91ed64a,33617,1731969895393; all regions closed. 2024-11-18T22:45:44,136 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:44,136 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:44,140 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:44,143 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:44,144 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:44,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741834_1010 (size=8107) 2024-11-18T22:45:44,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741834_1010 (size=8107) 2024-11-18T22:45:44,163 DEBUG [RS:0;3b0ca91ed64a:33617 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/oldWALs 2024-11-18T22:45:44,163 INFO [RS:0;3b0ca91ed64a:33617 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3b0ca91ed64a%2C33617%2C1731969895393.meta:.meta(num 1731969896387) 2024-11-18T22:45:44,163 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:44,163 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:44,164 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:44,164 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:44,164 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:44,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741881_1057 (size=780) 2024-11-18T22:45:44,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741881_1057 (size=780) 2024-11-18T22:45:44,174 DEBUG [RS:0;3b0ca91ed64a:33617 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/oldWALs 2024-11-18T22:45:44,174 INFO [RS:0;3b0ca91ed64a:33617 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3b0ca91ed64a%2C33617%2C1731969895393:(num 1731969943807) 2024-11-18T22:45:44,174 DEBUG [RS:0;3b0ca91ed64a:33617 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:45:44,174 INFO [RS:0;3b0ca91ed64a:33617 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T22:45:44,175 INFO [RS:0;3b0ca91ed64a:33617 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T22:45:44,175 INFO [RS:0;3b0ca91ed64a:33617 {}] hbase.ChoreService(370): Chore service for: regionserver/3b0ca91ed64a:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-18T22:45:44,175 INFO [RS:0;3b0ca91ed64a:33617 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T22:45:44,175 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T22:45:44,175 INFO [RS:0;3b0ca91ed64a:33617 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33617 2024-11-18T22:45:44,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T22:45:44,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33617-0x101501f46cc0001, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3b0ca91ed64a,33617,1731969895393 2024-11-18T22:45:44,182 INFO [RS:0;3b0ca91ed64a:33617 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T22:45:44,192 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3b0ca91ed64a,33617,1731969895393] 2024-11-18T22:45:44,203 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3b0ca91ed64a,33617,1731969895393 already deleted, retry=false 2024-11-18T22:45:44,203 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3b0ca91ed64a,33617,1731969895393 expired; onlineServers=0 2024-11-18T22:45:44,203 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3b0ca91ed64a,41377,1731969895226' ***** 2024-11-18T22:45:44,203 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T22:45:44,203 INFO [M:0;3b0ca91ed64a:41377 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T22:45:44,203 INFO [M:0;3b0ca91ed64a:41377 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T22:45:44,203 DEBUG [M:0;3b0ca91ed64a:41377 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T22:45:44,203 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T22:45:44,203 DEBUG [M:0;3b0ca91ed64a:41377 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T22:45:44,203 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.small.0-1731969895752 {}] cleaner.HFileCleaner(306): Exit Thread[master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.small.0-1731969895752,5,FailOnTimeoutGroup] 2024-11-18T22:45:44,203 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.large.0-1731969895752 {}] cleaner.HFileCleaner(306): Exit Thread[master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.large.0-1731969895752,5,FailOnTimeoutGroup] 2024-11-18T22:45:44,203 INFO [M:0;3b0ca91ed64a:41377 {}] hbase.ChoreService(370): Chore service for: master/3b0ca91ed64a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T22:45:44,203 INFO [M:0;3b0ca91ed64a:41377 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T22:45:44,203 DEBUG [M:0;3b0ca91ed64a:41377 {}] master.HMaster(1795): Stopping service threads 2024-11-18T22:45:44,203 INFO [M:0;3b0ca91ed64a:41377 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T22:45:44,203 INFO [M:0;3b0ca91ed64a:41377 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T22:45:44,204 INFO [M:0;3b0ca91ed64a:41377 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T22:45:44,204 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T22:45:44,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T22:45:44,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:45:44,224 DEBUG [M:0;3b0ca91ed64a:41377 {}] zookeeper.ZKUtil(347): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T22:45:44,224 WARN [M:0;3b0ca91ed64a:41377 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T22:45:44,224 INFO [M:0;3b0ca91ed64a:41377 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/.lastflushedseqids 2024-11-18T22:45:44,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741882_1058 (size=228) 2024-11-18T22:45:44,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741882_1058 (size=228) 2024-11-18T22:45:44,229 INFO [M:0;3b0ca91ed64a:41377 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T22:45:44,230 INFO [M:0;3b0ca91ed64a:41377 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T22:45:44,230 DEBUG [M:0;3b0ca91ed64a:41377 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T22:45:44,230 INFO [M:0;3b0ca91ed64a:41377 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:45:44,230 DEBUG [M:0;3b0ca91ed64a:41377 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:45:44,230 DEBUG [M:0;3b0ca91ed64a:41377 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T22:45:44,230 DEBUG [M:0;3b0ca91ed64a:41377 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:45:44,230 INFO [M:0;3b0ca91ed64a:41377 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.35 KB 2024-11-18T22:45:44,249 DEBUG [M:0;3b0ca91ed64a:41377 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e394ccdbfaf34da48d9f16224378a8bf is 82, key is hbase:meta,,1/info:regioninfo/1731969896412/Put/seqid=0 2024-11-18T22:45:44,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741883_1059 (size=5672) 2024-11-18T22:45:44,254 INFO [M:0;3b0ca91ed64a:41377 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e394ccdbfaf34da48d9f16224378a8bf 2024-11-18T22:45:44,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741883_1059 (size=5672) 2024-11-18T22:45:44,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:44,280 DEBUG [M:0;3b0ca91ed64a:41377 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/244fc9b429b2425b98d82ca3978bc57a is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731969897044/Put/seqid=0 2024-11-18T22:45:44,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741884_1060 (size=7090) 2024-11-18T22:45:44,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741884_1060 (size=7090) 2024-11-18T22:45:44,288 INFO [M:0;3b0ca91ed64a:41377 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/244fc9b429b2425b98d82ca3978bc57a 2024-11-18T22:45:44,293 INFO [RS:0;3b0ca91ed64a:33617 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T22:45:44,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33617-0x101501f46cc0001, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:45:44,293 INFO [RS:0;3b0ca91ed64a:33617 {}] regionserver.HRegionServer(1031): Exiting; stopping=3b0ca91ed64a,33617,1731969895393; zookeeper connection closed. 2024-11-18T22:45:44,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33617-0x101501f46cc0001, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:45:44,293 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7e98573b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7e98573b 2024-11-18T22:45:44,293 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T22:45:44,294 INFO [M:0;3b0ca91ed64a:41377 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 244fc9b429b2425b98d82ca3978bc57a 2024-11-18T22:45:44,310 DEBUG [M:0;3b0ca91ed64a:41377 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4025811e7cdd4fac9d200de5729e6f24 is 69, key is 3b0ca91ed64a,33617,1731969895393/rs:state/1731969895847/Put/seqid=0 2024-11-18T22:45:44,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741885_1061 (size=5156) 2024-11-18T22:45:44,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741885_1061 (size=5156) 2024-11-18T22:45:44,723 INFO [M:0;3b0ca91ed64a:41377 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4025811e7cdd4fac9d200de5729e6f24 2024-11-18T22:45:44,745 DEBUG [M:0;3b0ca91ed64a:41377 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/94e96848e3ee4713a4e2b00071091287 is 52, key is load_balancer_on/state:d/1731969896628/Put/seqid=0 2024-11-18T22:45:44,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741886_1062 (size=5056) 2024-11-18T22:45:44,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741886_1062 (size=5056) 2024-11-18T22:45:44,750 INFO [M:0;3b0ca91ed64a:41377 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/94e96848e3ee4713a4e2b00071091287 2024-11-18T22:45:44,754 DEBUG [M:0;3b0ca91ed64a:41377 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e394ccdbfaf34da48d9f16224378a8bf as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e394ccdbfaf34da48d9f16224378a8bf 2024-11-18T22:45:44,758 INFO [M:0;3b0ca91ed64a:41377 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e394ccdbfaf34da48d9f16224378a8bf, entries=8, sequenceid=125, filesize=5.5 K 2024-11-18T22:45:44,759 DEBUG [M:0;3b0ca91ed64a:41377 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/244fc9b429b2425b98d82ca3978bc57a as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/244fc9b429b2425b98d82ca3978bc57a 2024-11-18T22:45:44,764 INFO [M:0;3b0ca91ed64a:41377 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 244fc9b429b2425b98d82ca3978bc57a 2024-11-18T22:45:44,764 INFO [M:0;3b0ca91ed64a:41377 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/244fc9b429b2425b98d82ca3978bc57a, entries=13, sequenceid=125, filesize=6.9 K 2024-11-18T22:45:44,765 DEBUG [M:0;3b0ca91ed64a:41377 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4025811e7cdd4fac9d200de5729e6f24 as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4025811e7cdd4fac9d200de5729e6f24 2024-11-18T22:45:44,769 INFO [M:0;3b0ca91ed64a:41377 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4025811e7cdd4fac9d200de5729e6f24, entries=1, sequenceid=125, filesize=5.0 K 2024-11-18T22:45:44,770 DEBUG [M:0;3b0ca91ed64a:41377 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/94e96848e3ee4713a4e2b00071091287 as hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/94e96848e3ee4713a4e2b00071091287 2024-11-18T22:45:44,775 INFO [M:0;3b0ca91ed64a:41377 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41169/user/jenkins/test-data/d442bbfb-b074-e71c-3c47-a4e8737d4f37/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/94e96848e3ee4713a4e2b00071091287, entries=1, sequenceid=125, filesize=4.9 K 2024-11-18T22:45:44,776 INFO [M:0;3b0ca91ed64a:41377 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 545ms, sequenceid=125, compaction requested=false 2024-11-18T22:45:44,779 INFO [M:0;3b0ca91ed64a:41377 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:45:44,779 DEBUG [M:0;3b0ca91ed64a:41377 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731969944230Disabling compacts and flushes for region at 1731969944230Disabling writes for close at 1731969944230Obtaining lock to block concurrent updates at 1731969944230Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731969944230Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64808, getOffHeapSize=0, getCellsCount=148 at 1731969944230Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731969944231 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731969944231Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731969944249 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731969944249Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731969944260 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731969944279 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731969944279Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731969944294 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731969944309 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731969944309Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731969944728 (+419 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731969944744 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731969944744Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1c145674: reopening flushed file at 1731969944753 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f9ef1aa: reopening flushed file at 1731969944758 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@20b78df5: reopening flushed file at 1731969944764 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1b60ae5c: reopening flushed file at 1731969944769 (+5 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 545ms, sequenceid=125, compaction requested=false at 1731969944776 (+7 ms)Writing region close event to WAL at 1731969944779 (+3 ms)Closed at 1731969944779 2024-11-18T22:45:44,779 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:44,780 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:44,780 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:44,780 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:44,780 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:44,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43435 is added to blk_1073741830_1006 (size=61320) 2024-11-18T22:45:44,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39621 is added to blk_1073741830_1006 (size=61320) 2024-11-18T22:45:44,782 INFO [M:0;3b0ca91ed64a:41377 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T22:45:44,782 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T22:45:44,782 INFO [M:0;3b0ca91ed64a:41377 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41377 2024-11-18T22:45:44,782 INFO [M:0;3b0ca91ed64a:41377 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T22:45:44,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:44,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:45:44,892 INFO [M:0;3b0ca91ed64a:41377 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T22:45:44,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41377-0x101501f46cc0000, quorum=127.0.0.1:51879, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:45:44,895 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@33cca936{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:45:44,896 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4f9d87e6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:45:44,896 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:45:44,896 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6bdfb606{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:45:44,896 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@9bd0638{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/hadoop.log.dir/,STOPPED} 2024-11-18T22:45:44,898 WARN [BP-1003182609-172.17.0.2-1731969892022 heartbeating to localhost/127.0.0.1:41169 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T22:45:44,898 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T22:45:44,898 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T22:45:44,898 WARN [BP-1003182609-172.17.0.2-1731969892022 heartbeating to localhost/127.0.0.1:41169 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1003182609-172.17.0.2-1731969892022 (Datanode Uuid 2cde33cc-72ad-4f80-8284-45620e735d2d) service to localhost/127.0.0.1:41169 2024-11-18T22:45:44,899 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/cluster_e6201b81-0b9d-d443-2822-e36d949cc7ee/data/data3/current/BP-1003182609-172.17.0.2-1731969892022 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:45:44,899 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/cluster_e6201b81-0b9d-d443-2822-e36d949cc7ee/data/data4/current/BP-1003182609-172.17.0.2-1731969892022 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:45:44,899 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T22:45:44,901 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1370e23f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:45:44,901 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2cf5c358{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:45:44,901 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:45:44,901 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@722ee419{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:45:44,901 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@f5cb74f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/hadoop.log.dir/,STOPPED} 2024-11-18T22:45:44,903 WARN [BP-1003182609-172.17.0.2-1731969892022 heartbeating to localhost/127.0.0.1:41169 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T22:45:44,903 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T22:45:44,903 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T22:45:44,903 WARN [BP-1003182609-172.17.0.2-1731969892022 heartbeating to localhost/127.0.0.1:41169 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1003182609-172.17.0.2-1731969892022 (Datanode Uuid 9415e542-9dc7-4dee-a16f-5d5c5eabb9a1) service to localhost/127.0.0.1:41169 2024-11-18T22:45:44,904 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/cluster_e6201b81-0b9d-d443-2822-e36d949cc7ee/data/data1/current/BP-1003182609-172.17.0.2-1731969892022 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:45:44,904 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/cluster_e6201b81-0b9d-d443-2822-e36d949cc7ee/data/data2/current/BP-1003182609-172.17.0.2-1731969892022 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:45:44,904 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T22:45:44,911 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2b89aa23{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T22:45:44,913 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1cbb9cb0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:45:44,913 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:45:44,913 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1215a9cb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:45:44,913 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@55409da5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/hadoop.log.dir/,STOPPED} 2024-11-18T22:45:44,926 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T22:45:44,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T22:45:44,979 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=232 (was 204) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41169 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:41169 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41169 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:41169 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:41169 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41169 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:41169 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/3b0ca91ed64a:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41169 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=518 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=757 (was 949), ProcessCount=11 (was 11), AvailableMemoryMB=4538 (was 5353) 2024-11-18T22:45:44,988 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=232, OpenFileDescriptor=518, MaxFileDescriptor=1048576, SystemLoadAverage=757, ProcessCount=11, AvailableMemoryMB=4539 2024-11-18T22:45:44,988 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-18T22:45:44,988 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/hadoop.log.dir so I do NOT create it in target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605 2024-11-18T22:45:44,988 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0f36fc38-96b2-ec1b-b1aa-3f83e849ff46/hadoop.tmp.dir so I do NOT create it in target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605 2024-11-18T22:45:44,988 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/cluster_d8e3d33d-0ea4-6a3b-9b7d-266378e774db, deleteOnExit=true 2024-11-18T22:45:44,988 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-18T22:45:44,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/test.cache.data in system properties and HBase conf 2024-11-18T22:45:44,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/hadoop.tmp.dir in system properties and HBase conf 2024-11-18T22:45:44,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/hadoop.log.dir in system properties and HBase conf 2024-11-18T22:45:44,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-18T22:45:44,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-18T22:45:44,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-18T22:45:44,989 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-18T22:45:44,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-18T22:45:44,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-18T22:45:44,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-18T22:45:44,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T22:45:44,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-18T22:45:44,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-18T22:45:44,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-18T22:45:44,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T22:45:44,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-18T22:45:44,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/nfs.dump.dir in system properties and HBase conf 2024-11-18T22:45:44,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/java.io.tmpdir in system properties and HBase conf 2024-11-18T22:45:44,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-18T22:45:44,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-18T22:45:44,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-18T22:45:45,006 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T22:45:45,087 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:45,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:45,361 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:45:45,365 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:45:45,366 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:45:45,366 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:45:45,366 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T22:45:45,367 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:45:45,367 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fbdffd4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:45:45,368 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35fc983f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:45:45,479 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2a54ad5a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/java.io.tmpdir/jetty-localhost-43393-hadoop-hdfs-3_4_1-tests_jar-_-any-16912664478012626910/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T22:45:45,480 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@561d915{HTTP/1.1, (http/1.1)}{localhost:43393} 2024-11-18T22:45:45,480 INFO [Time-limited test {}] server.Server(415): Started @339280ms 2024-11-18T22:45:45,495 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-18T22:45:45,849 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:45,874 INFO [regionserver/3b0ca91ed64a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T22:45:45,912 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:45:45,915 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:45:45,916 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:45:45,916 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:45:45,916 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-18T22:45:45,917 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7583bc54{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:45:45,917 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6876746e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:45:46,024 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2b3ad3e3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/java.io.tmpdir/jetty-localhost-46553-hadoop-hdfs-3_4_1-tests_jar-_-any-15372816575442918274/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:45:46,024 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7fa9909e{HTTP/1.1, (http/1.1)}{localhost:46553} 2024-11-18T22:45:46,024 INFO [Time-limited test {}] server.Server(415): Started @339824ms 2024-11-18T22:45:46,025 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T22:45:46,050 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-18T22:45:46,052 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-18T22:45:46,053 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-18T22:45:46,053 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-18T22:45:46,053 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-18T22:45:46,053 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a747f73{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/hadoop.log.dir/,AVAILABLE} 2024-11-18T22:45:46,053 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40ff6368{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-18T22:45:46,088 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:46,159 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6566813b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/java.io.tmpdir/jetty-localhost-43175-hadoop-hdfs-3_4_1-tests_jar-_-any-903604470517269035/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:45:46,159 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@73d1af03{HTTP/1.1, (http/1.1)}{localhost:43175} 2024-11-18T22:45:46,160 INFO [Time-limited test {}] server.Server(415): Started @339959ms 2024-11-18T22:45:46,161 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-18T22:45:46,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:46,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:47,088 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:47,186 WARN [Thread-2528 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/cluster_d8e3d33d-0ea4-6a3b-9b7d-266378e774db/data/data1/current/BP-1711530157-172.17.0.2-1731969945010/current, will proceed with Du for space computation calculation, 2024-11-18T22:45:47,186 WARN [Thread-2529 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/cluster_d8e3d33d-0ea4-6a3b-9b7d-266378e774db/data/data2/current/BP-1711530157-172.17.0.2-1731969945010/current, will proceed with Du for space computation calculation, 2024-11-18T22:45:47,217 WARN [Thread-2492 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T22:45:47,219 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9f23a087170ec093 with lease ID 0xf6cc35af37d87326: Processing first storage report for DS-4ca2fb20-b445-4562-84dd-8b6349ba8bf6 from datanode DatanodeRegistration(127.0.0.1:39167, datanodeUuid=581da9ea-0637-42ca-93e4-da24997accd6, infoPort=34017, infoSecurePort=0, ipcPort=39503, storageInfo=lv=-57;cid=testClusterID;nsid=1209302509;c=1731969945010) 2024-11-18T22:45:47,219 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9f23a087170ec093 with lease ID 0xf6cc35af37d87326: from storage DS-4ca2fb20-b445-4562-84dd-8b6349ba8bf6 node DatanodeRegistration(127.0.0.1:39167, datanodeUuid=581da9ea-0637-42ca-93e4-da24997accd6, infoPort=34017, infoSecurePort=0, ipcPort=39503, storageInfo=lv=-57;cid=testClusterID;nsid=1209302509;c=1731969945010), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-18T22:45:47,219 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9f23a087170ec093 with lease ID 0xf6cc35af37d87326: Processing first storage report for DS-5d80d756-bd24-4bc7-8ac2-1aa36c6e99c4 from datanode DatanodeRegistration(127.0.0.1:39167, datanodeUuid=581da9ea-0637-42ca-93e4-da24997accd6, infoPort=34017, infoSecurePort=0, ipcPort=39503, storageInfo=lv=-57;cid=testClusterID;nsid=1209302509;c=1731969945010) 2024-11-18T22:45:47,219 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9f23a087170ec093 with lease ID 0xf6cc35af37d87326: from storage DS-5d80d756-bd24-4bc7-8ac2-1aa36c6e99c4 node DatanodeRegistration(127.0.0.1:39167, datanodeUuid=581da9ea-0637-42ca-93e4-da24997accd6, infoPort=34017, infoSecurePort=0, ipcPort=39503, storageInfo=lv=-57;cid=testClusterID;nsid=1209302509;c=1731969945010), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:45:47,227 WARN [Thread-2539 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/cluster_d8e3d33d-0ea4-6a3b-9b7d-266378e774db/data/data3/current/BP-1711530157-172.17.0.2-1731969945010/current, will proceed with Du for space computation calculation, 2024-11-18T22:45:47,227 WARN [Thread-2540 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/cluster_d8e3d33d-0ea4-6a3b-9b7d-266378e774db/data/data4/current/BP-1711530157-172.17.0.2-1731969945010/current, will proceed with Du for space computation calculation, 2024-11-18T22:45:47,235 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T22:45:47,235 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-18T22:45:47,235 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-18T22:45:47,236 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-18T22:45:47,249 WARN [Thread-2515 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-18T22:45:47,251 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x229036f91f5e9fab with lease ID 0xf6cc35af37d87327: Processing first storage report for DS-3191b3b1-120b-47a8-a683-fd22c1bd9419 from datanode DatanodeRegistration(127.0.0.1:42995, datanodeUuid=559de490-eefb-46c3-b799-6eb4955e6239, infoPort=34577, infoSecurePort=0, ipcPort=45437, storageInfo=lv=-57;cid=testClusterID;nsid=1209302509;c=1731969945010) 2024-11-18T22:45:47,251 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x229036f91f5e9fab with lease ID 0xf6cc35af37d87327: from storage DS-3191b3b1-120b-47a8-a683-fd22c1bd9419 node DatanodeRegistration(127.0.0.1:42995, datanodeUuid=559de490-eefb-46c3-b799-6eb4955e6239, infoPort=34577, infoSecurePort=0, ipcPort=45437, storageInfo=lv=-57;cid=testClusterID;nsid=1209302509;c=1731969945010), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:45:47,251 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x229036f91f5e9fab with lease ID 0xf6cc35af37d87327: Processing first storage report for DS-ef8d9654-0f13-40a4-ba9c-13326334938a from datanode DatanodeRegistration(127.0.0.1:42995, datanodeUuid=559de490-eefb-46c3-b799-6eb4955e6239, infoPort=34577, infoSecurePort=0, ipcPort=45437, storageInfo=lv=-57;cid=testClusterID;nsid=1209302509;c=1731969945010) 2024-11-18T22:45:47,251 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x229036f91f5e9fab with lease ID 0xf6cc35af37d87327: from storage DS-ef8d9654-0f13-40a4-ba9c-13326334938a node DatanodeRegistration(127.0.0.1:42995, datanodeUuid=559de490-eefb-46c3-b799-6eb4955e6239, infoPort=34577, infoSecurePort=0, ipcPort=45437, storageInfo=lv=-57;cid=testClusterID;nsid=1209302509;c=1731969945010), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-18T22:45:47,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:47,291 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605 2024-11-18T22:45:47,294 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/cluster_d8e3d33d-0ea4-6a3b-9b7d-266378e774db/zookeeper_0, clientPort=63179, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/cluster_d8e3d33d-0ea4-6a3b-9b7d-266378e774db/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/cluster_d8e3d33d-0ea4-6a3b-9b7d-266378e774db/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-18T22:45:47,294 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63179 2024-11-18T22:45:47,294 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:45:47,296 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:45:47,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42995 is added to blk_1073741825_1001 (size=7) 2024-11-18T22:45:47,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39167 is added to blk_1073741825_1001 (size=7) 2024-11-18T22:45:47,306 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56 with version=8 2024-11-18T22:45:47,306 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45791/user/jenkins/test-data/dac4efd0-15d1-8e12-2a2c-0cdca71e5f72/hbase-staging 2024-11-18T22:45:47,308 INFO [Time-limited test {}] client.ConnectionUtils(128): master/3b0ca91ed64a:0 server-side Connection retries=45 2024-11-18T22:45:47,308 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:45:47,308 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T22:45:47,308 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T22:45:47,308 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:45:47,308 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T22:45:47,308 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-18T22:45:47,308 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T22:45:47,312 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46767 2024-11-18T22:45:47,314 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46767 connecting to ZooKeeper ensemble=127.0.0.1:63179 2024-11-18T22:45:47,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:467670x0, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T22:45:47,363 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46767-0x101502012480000 connected 2024-11-18T22:45:47,444 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:45:47,446 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:45:47,450 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:45:47,450 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56, hbase.cluster.distributed=false 2024-11-18T22:45:47,451 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T22:45:47,452 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46767 2024-11-18T22:45:47,452 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46767 2024-11-18T22:45:47,452 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46767 2024-11-18T22:45:47,453 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46767 2024-11-18T22:45:47,453 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46767 2024-11-18T22:45:47,472 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/3b0ca91ed64a:0 server-side Connection retries=45 2024-11-18T22:45:47,472 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:45:47,472 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-18T22:45:47,472 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-18T22:45:47,472 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-18T22:45:47,472 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-18T22:45:47,472 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-18T22:45:47,472 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-18T22:45:47,473 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33405 2024-11-18T22:45:47,474 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33405 connecting to ZooKeeper ensemble=127.0.0.1:63179 2024-11-18T22:45:47,475 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:45:47,476 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:45:47,486 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:334050x0, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-18T22:45:47,486 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:334050x0, quorum=127.0.0.1:63179, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:45:47,486 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33405-0x101502012480001 connected 2024-11-18T22:45:47,487 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-18T22:45:47,489 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-18T22:45:47,490 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33405-0x101502012480001, quorum=127.0.0.1:63179, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-18T22:45:47,491 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33405-0x101502012480001, quorum=127.0.0.1:63179, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-18T22:45:47,492 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33405 2024-11-18T22:45:47,492 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33405 2024-11-18T22:45:47,492 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33405 2024-11-18T22:45:47,493 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33405 2024-11-18T22:45:47,493 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33405 2024-11-18T22:45:47,505 DEBUG [M:0;3b0ca91ed64a:46767 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;3b0ca91ed64a:46767 2024-11-18T22:45:47,505 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/3b0ca91ed64a,46767,1731969947307 2024-11-18T22:45:47,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T22:45:47,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x101502012480001, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T22:45:47,518 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/3b0ca91ed64a,46767,1731969947307 2024-11-18T22:45:47,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:45:47,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x101502012480001, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-18T22:45:47,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x101502012480001, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:45:47,528 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-18T22:45:47,529 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/3b0ca91ed64a,46767,1731969947307 from backup master directory 2024-11-18T22:45:47,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x101502012480001, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T22:45:47,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/3b0ca91ed64a,46767,1731969947307 2024-11-18T22:45:47,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-18T22:45:47,538 WARN [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T22:45:47,538 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=3b0ca91ed64a,46767,1731969947307 2024-11-18T22:45:47,543 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/hbase.id] with ID: 96331727-e74e-4983-808e-2c5bb264dc02 2024-11-18T22:45:47,543 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/.tmp/hbase.id 2024-11-18T22:45:47,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39167 is added to blk_1073741826_1002 (size=42) 2024-11-18T22:45:47,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42995 is added to blk_1073741826_1002 (size=42) 2024-11-18T22:45:47,561 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/.tmp/hbase.id]:[hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/hbase.id] 2024-11-18T22:45:47,572 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:45:47,572 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-18T22:45:47,573 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-18T22:45:47,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:45:47,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x101502012480001, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:45:47,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39167 is added to blk_1073741827_1003 (size=196) 2024-11-18T22:45:47,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42995 is added to blk_1073741827_1003 (size=196) 2024-11-18T22:45:47,590 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-18T22:45:47,591 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-18T22:45:47,592 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T22:45:47,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42995 is added to blk_1073741828_1004 (size=1189) 2024-11-18T22:45:47,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39167 is added to blk_1073741828_1004 (size=1189) 2024-11-18T22:45:47,599 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/data/master/store 2024-11-18T22:45:47,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39167 is added to blk_1073741829_1005 (size=34) 2024-11-18T22:45:47,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42995 is added to blk_1073741829_1005 (size=34) 2024-11-18T22:45:47,609 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:45:47,609 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T22:45:47,609 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:45:47,609 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:45:47,609 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T22:45:47,609 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:45:47,609 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:45:47,609 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731969947609Disabling compacts and flushes for region at 1731969947609Disabling writes for close at 1731969947609Writing region close event to WAL at 1731969947609Closed at 1731969947609 2024-11-18T22:45:47,610 WARN [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/data/master/store/.initializing 2024-11-18T22:45:47,610 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/WALs/3b0ca91ed64a,46767,1731969947307 2024-11-18T22:45:47,612 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3b0ca91ed64a%2C46767%2C1731969947307, suffix=, logDir=hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/WALs/3b0ca91ed64a,46767,1731969947307, archiveDir=hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/oldWALs, maxLogs=10 2024-11-18T22:45:47,612 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C46767%2C1731969947307.1731969947612 2024-11-18T22:45:47,619 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/WALs/3b0ca91ed64a,46767,1731969947307/3b0ca91ed64a%2C46767%2C1731969947307.1731969947612 2024-11-18T22:45:47,624 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34017:34017),(127.0.0.1/127.0.0.1:34577:34577)] 2024-11-18T22:45:47,628 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-18T22:45:47,628 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:45:47,628 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:45:47,628 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:45:47,630 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:45:47,631 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-18T22:45:47,631 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:45:47,632 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:45:47,632 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:45:47,633 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-18T22:45:47,633 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:45:47,634 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T22:45:47,634 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:45:47,635 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-18T22:45:47,635 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:45:47,635 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T22:45:47,635 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:45:47,636 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-18T22:45:47,636 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:45:47,636 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-18T22:45:47,637 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:45:47,637 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:45:47,638 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:45:47,639 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:45:47,639 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:45:47,639 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-18T22:45:47,640 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-18T22:45:47,642 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T22:45:47,642 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=776483, jitterRate=-0.012650996446609497}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-18T22:45:47,643 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731969947629Initializing all the Stores at 1731969947629Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969947629Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969947630 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969947630Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969947630Cleaning up temporary data from old regions at 1731969947639 (+9 ms)Region opened successfully at 1731969947643 (+4 ms) 2024-11-18T22:45:47,643 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-18T22:45:47,646 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d6e2b06, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3b0ca91ed64a/172.17.0.2:0 2024-11-18T22:45:47,647 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-18T22:45:47,647 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-18T22:45:47,647 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-18T22:45:47,647 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-18T22:45:47,648 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-18T22:45:47,648 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-18T22:45:47,648 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-18T22:45:47,651 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-18T22:45:47,652 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-18T22:45:47,661 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-18T22:45:47,662 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-18T22:45:47,662 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-18T22:45:47,672 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-18T22:45:47,672 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-18T22:45:47,673 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-18T22:45:47,686 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-18T22:45:47,687 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-18T22:45:47,696 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-18T22:45:47,699 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-18T22:45:47,707 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-18T22:45:47,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x101502012480001, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T22:45:47,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x101502012480001, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:45:47,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-18T22:45:47,718 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:45:47,719 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=3b0ca91ed64a,46767,1731969947307, sessionid=0x101502012480000, setting cluster-up flag (Was=false) 2024-11-18T22:45:47,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:45:47,738 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x101502012480001, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:45:47,791 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-18T22:45:47,792 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3b0ca91ed64a,46767,1731969947307 2024-11-18T22:45:47,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x101502012480001, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:45:47,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:45:47,844 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-18T22:45:47,845 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=3b0ca91ed64a,46767,1731969947307 2024-11-18T22:45:47,846 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-18T22:45:47,848 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-18T22:45:47,848 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-18T22:45:47,848 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-18T22:45:47,848 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 3b0ca91ed64a,46767,1731969947307 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-18T22:45:47,849 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/3b0ca91ed64a:0, corePoolSize=5, maxPoolSize=5 2024-11-18T22:45:47,849 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/3b0ca91ed64a:0, corePoolSize=5, maxPoolSize=5 2024-11-18T22:45:47,850 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=5, maxPoolSize=5 2024-11-18T22:45:47,850 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=5, maxPoolSize=5 2024-11-18T22:45:47,850 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/3b0ca91ed64a:0, corePoolSize=10, maxPoolSize=10 2024-11-18T22:45:47,850 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:45:47,850 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=2, maxPoolSize=2 2024-11-18T22:45:47,850 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:45:47,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:47,851 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731969977851 2024-11-18T22:45:47,851 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-18T22:45:47,851 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-18T22:45:47,851 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-18T22:45:47,851 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-18T22:45:47,851 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-18T22:45:47,851 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-18T22:45:47,851 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T22:45:47,852 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-18T22:45:47,852 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-18T22:45:47,852 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-18T22:45:47,852 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-18T22:45:47,852 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-18T22:45:47,856 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.large.0-1731969947852,5,FailOnTimeoutGroup] 2024-11-18T22:45:47,857 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T22:45:47,857 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-18T22:45:47,858 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:45:47,858 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-18T22:45:47,860 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.small.0-1731969947856,5,FailOnTimeoutGroup] 2024-11-18T22:45:47,860 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-18T22:45:47,860 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-18T22:45:47,860 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-18T22:45:47,860 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-18T22:45:47,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39167 is added to blk_1073741831_1007 (size=1321) 2024-11-18T22:45:47,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42995 is added to blk_1073741831_1007 (size=1321) 2024-11-18T22:45:47,878 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-18T22:45:47,879 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56 2024-11-18T22:45:47,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42995 is added to blk_1073741832_1008 (size=32) 2024-11-18T22:45:47,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39167 is added to blk_1073741832_1008 (size=32) 2024-11-18T22:45:47,887 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:45:47,889 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T22:45:47,890 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T22:45:47,890 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:45:47,890 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:45:47,890 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T22:45:47,891 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T22:45:47,891 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:45:47,891 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:45:47,891 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T22:45:47,892 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T22:45:47,892 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:45:47,893 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:45:47,893 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T22:45:47,894 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T22:45:47,894 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:45:47,895 INFO [RS:0;3b0ca91ed64a:33405 {}] regionserver.HRegionServer(746): ClusterId : 96331727-e74e-4983-808e-2c5bb264dc02 2024-11-18T22:45:47,895 DEBUG [RS:0;3b0ca91ed64a:33405 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-18T22:45:47,895 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:45:47,895 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T22:45:47,895 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/data/hbase/meta/1588230740 2024-11-18T22:45:47,895 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/data/hbase/meta/1588230740 2024-11-18T22:45:47,896 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T22:45:47,897 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T22:45:47,897 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T22:45:47,898 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T22:45:47,900 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-18T22:45:47,901 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=688882, jitterRate=-0.1240418404340744}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T22:45:47,901 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731969947888Initializing all the Stores at 1731969947888Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969947888Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969947888Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969947888Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969947888Cleaning up temporary data from old regions at 1731969947897 (+9 ms)Region opened successfully at 1731969947901 (+4 ms) 2024-11-18T22:45:47,901 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T22:45:47,902 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T22:45:47,902 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T22:45:47,902 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T22:45:47,902 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T22:45:47,902 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T22:45:47,902 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731969947901Disabling compacts and flushes for region at 1731969947901Disabling writes for close at 1731969947902 (+1 ms)Writing region close event to WAL at 1731969947902Closed at 1731969947902 2024-11-18T22:45:47,903 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T22:45:47,903 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-18T22:45:47,903 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-18T22:45:47,904 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T22:45:47,905 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-18T22:45:47,908 DEBUG [RS:0;3b0ca91ed64a:33405 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-18T22:45:47,908 DEBUG [RS:0;3b0ca91ed64a:33405 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-18T22:45:47,918 DEBUG [RS:0;3b0ca91ed64a:33405 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-18T22:45:47,919 DEBUG [RS:0;3b0ca91ed64a:33405 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21cf50bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=3b0ca91ed64a/172.17.0.2:0 2024-11-18T22:45:47,932 DEBUG [RS:0;3b0ca91ed64a:33405 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;3b0ca91ed64a:33405 2024-11-18T22:45:47,932 INFO [RS:0;3b0ca91ed64a:33405 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-18T22:45:47,932 INFO [RS:0;3b0ca91ed64a:33405 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-18T22:45:47,932 DEBUG [RS:0;3b0ca91ed64a:33405 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-18T22:45:47,933 INFO [RS:0;3b0ca91ed64a:33405 {}] regionserver.HRegionServer(2659): reportForDuty to master=3b0ca91ed64a,46767,1731969947307 with port=33405, startcode=1731969947471 2024-11-18T22:45:47,933 DEBUG [RS:0;3b0ca91ed64a:33405 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-18T22:45:47,935 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53101, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-18T22:45:47,935 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46767 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 3b0ca91ed64a,33405,1731969947471 2024-11-18T22:45:47,935 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46767 {}] master.ServerManager(517): Registering regionserver=3b0ca91ed64a,33405,1731969947471 2024-11-18T22:45:47,937 DEBUG [RS:0;3b0ca91ed64a:33405 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56 2024-11-18T22:45:47,937 DEBUG [RS:0;3b0ca91ed64a:33405 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40849 2024-11-18T22:45:47,937 DEBUG [RS:0;3b0ca91ed64a:33405 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-18T22:45:47,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T22:45:47,949 DEBUG [RS:0;3b0ca91ed64a:33405 {}] zookeeper.ZKUtil(111): regionserver:33405-0x101502012480001, quorum=127.0.0.1:63179, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/3b0ca91ed64a,33405,1731969947471 2024-11-18T22:45:47,949 WARN [RS:0;3b0ca91ed64a:33405 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-18T22:45:47,949 INFO [RS:0;3b0ca91ed64a:33405 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T22:45:47,950 DEBUG [RS:0;3b0ca91ed64a:33405 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/WALs/3b0ca91ed64a,33405,1731969947471 2024-11-18T22:45:47,950 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [3b0ca91ed64a,33405,1731969947471] 2024-11-18T22:45:47,953 INFO [RS:0;3b0ca91ed64a:33405 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-18T22:45:47,954 INFO [RS:0;3b0ca91ed64a:33405 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-18T22:45:47,955 INFO [RS:0;3b0ca91ed64a:33405 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-18T22:45:47,955 INFO [RS:0;3b0ca91ed64a:33405 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:45:47,955 INFO [RS:0;3b0ca91ed64a:33405 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-18T22:45:47,956 INFO [RS:0;3b0ca91ed64a:33405 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-18T22:45:47,956 INFO [RS:0;3b0ca91ed64a:33405 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-18T22:45:47,956 DEBUG [RS:0;3b0ca91ed64a:33405 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:45:47,956 DEBUG [RS:0;3b0ca91ed64a:33405 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:45:47,956 DEBUG [RS:0;3b0ca91ed64a:33405 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:45:47,956 DEBUG [RS:0;3b0ca91ed64a:33405 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:45:47,956 DEBUG [RS:0;3b0ca91ed64a:33405 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:45:47,956 DEBUG [RS:0;3b0ca91ed64a:33405 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/3b0ca91ed64a:0, corePoolSize=2, maxPoolSize=2 2024-11-18T22:45:47,956 DEBUG [RS:0;3b0ca91ed64a:33405 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:45:47,956 DEBUG [RS:0;3b0ca91ed64a:33405 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:45:47,956 DEBUG [RS:0;3b0ca91ed64a:33405 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:45:47,956 DEBUG [RS:0;3b0ca91ed64a:33405 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:45:47,956 DEBUG [RS:0;3b0ca91ed64a:33405 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:45:47,956 DEBUG [RS:0;3b0ca91ed64a:33405 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/3b0ca91ed64a:0, corePoolSize=1, maxPoolSize=1 2024-11-18T22:45:47,956 DEBUG [RS:0;3b0ca91ed64a:33405 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/3b0ca91ed64a:0, corePoolSize=3, maxPoolSize=3 2024-11-18T22:45:47,956 DEBUG [RS:0;3b0ca91ed64a:33405 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/3b0ca91ed64a:0, corePoolSize=3, maxPoolSize=3 2024-11-18T22:45:47,957 INFO [RS:0;3b0ca91ed64a:33405 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T22:45:47,957 INFO [RS:0;3b0ca91ed64a:33405 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-18T22:45:47,957 INFO [RS:0;3b0ca91ed64a:33405 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:45:47,957 INFO [RS:0;3b0ca91ed64a:33405 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-18T22:45:47,957 INFO [RS:0;3b0ca91ed64a:33405 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-18T22:45:47,957 INFO [RS:0;3b0ca91ed64a:33405 {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,33405,1731969947471-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T22:45:47,974 INFO [RS:0;3b0ca91ed64a:33405 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-18T22:45:47,974 INFO [RS:0;3b0ca91ed64a:33405 {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,33405,1731969947471-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:45:47,974 INFO [RS:0;3b0ca91ed64a:33405 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:45:47,974 INFO [RS:0;3b0ca91ed64a:33405 {}] regionserver.Replication(171): 3b0ca91ed64a,33405,1731969947471 started 2024-11-18T22:45:47,991 INFO [RS:0;3b0ca91ed64a:33405 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:45:47,991 INFO [RS:0;3b0ca91ed64a:33405 {}] regionserver.HRegionServer(1482): Serving as 3b0ca91ed64a,33405,1731969947471, RpcServer on 3b0ca91ed64a/172.17.0.2:33405, sessionid=0x101502012480001 2024-11-18T22:45:47,992 DEBUG [RS:0;3b0ca91ed64a:33405 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-18T22:45:47,992 DEBUG [RS:0;3b0ca91ed64a:33405 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 3b0ca91ed64a,33405,1731969947471 2024-11-18T22:45:47,992 DEBUG [RS:0;3b0ca91ed64a:33405 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3b0ca91ed64a,33405,1731969947471' 2024-11-18T22:45:47,992 DEBUG [RS:0;3b0ca91ed64a:33405 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-18T22:45:47,992 DEBUG [RS:0;3b0ca91ed64a:33405 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-18T22:45:47,993 DEBUG [RS:0;3b0ca91ed64a:33405 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-18T22:45:47,993 DEBUG [RS:0;3b0ca91ed64a:33405 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-18T22:45:47,993 DEBUG [RS:0;3b0ca91ed64a:33405 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 3b0ca91ed64a,33405,1731969947471 2024-11-18T22:45:47,993 DEBUG [RS:0;3b0ca91ed64a:33405 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '3b0ca91ed64a,33405,1731969947471' 2024-11-18T22:45:47,993 DEBUG [RS:0;3b0ca91ed64a:33405 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-18T22:45:47,993 DEBUG [RS:0;3b0ca91ed64a:33405 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-18T22:45:47,993 DEBUG [RS:0;3b0ca91ed64a:33405 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-18T22:45:47,993 INFO [RS:0;3b0ca91ed64a:33405 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-18T22:45:47,993 INFO [RS:0;3b0ca91ed64a:33405 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-18T22:45:48,055 WARN [3b0ca91ed64a:46767 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-18T22:45:48,088 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:48,095 INFO [RS:0;3b0ca91ed64a:33405 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3b0ca91ed64a%2C33405%2C1731969947471, suffix=, logDir=hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/WALs/3b0ca91ed64a,33405,1731969947471, archiveDir=hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/oldWALs, maxLogs=32 2024-11-18T22:45:48,096 INFO [RS:0;3b0ca91ed64a:33405 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C33405%2C1731969947471.1731969948096 2024-11-18T22:45:48,102 INFO [RS:0;3b0ca91ed64a:33405 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/WALs/3b0ca91ed64a,33405,1731969947471/3b0ca91ed64a%2C33405%2C1731969947471.1731969948096 2024-11-18T22:45:48,103 DEBUG [RS:0;3b0ca91ed64a:33405 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34577:34577),(127.0.0.1/127.0.0.1:34017:34017)] 2024-11-18T22:45:48,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:48,305 DEBUG [3b0ca91ed64a:46767 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-18T22:45:48,306 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=3b0ca91ed64a,33405,1731969947471 2024-11-18T22:45:48,307 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3b0ca91ed64a,33405,1731969947471, state=OPENING 2024-11-18T22:45:48,317 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-18T22:45:48,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:45:48,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x101502012480001, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:45:48,328 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-18T22:45:48,329 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T22:45:48,329 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=3b0ca91ed64a,33405,1731969947471}] 2024-11-18T22:45:48,330 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T22:45:48,482 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-18T22:45:48,484 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54707, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-18T22:45:48,487 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-18T22:45:48,487 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T22:45:48,488 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=3b0ca91ed64a%2C33405%2C1731969947471.meta, suffix=.meta, logDir=hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/WALs/3b0ca91ed64a,33405,1731969947471, archiveDir=hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/oldWALs, maxLogs=32 2024-11-18T22:45:48,489 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 3b0ca91ed64a%2C33405%2C1731969947471.meta.1731969948489.meta 2024-11-18T22:45:48,493 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/WALs/3b0ca91ed64a,33405,1731969947471/3b0ca91ed64a%2C33405%2C1731969947471.meta.1731969948489.meta 2024-11-18T22:45:48,494 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34577:34577),(127.0.0.1/127.0.0.1:34017:34017)] 2024-11-18T22:45:48,495 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-18T22:45:48,495 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-18T22:45:48,496 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-18T22:45:48,496 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-18T22:45:48,496 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-18T22:45:48,496 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-18T22:45:48,496 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-18T22:45:48,496 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-18T22:45:48,497 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-18T22:45:48,498 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-18T22:45:48,498 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:45:48,498 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:45:48,498 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-18T22:45:48,499 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-18T22:45:48,499 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:45:48,500 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:45:48,500 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-18T22:45:48,501 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-18T22:45:48,501 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:45:48,501 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:45:48,501 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-18T22:45:48,502 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-18T22:45:48,502 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-18T22:45:48,502 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-18T22:45:48,502 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-18T22:45:48,503 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/data/hbase/meta/1588230740 2024-11-18T22:45:48,504 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/data/hbase/meta/1588230740 2024-11-18T22:45:48,506 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-18T22:45:48,506 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-18T22:45:48,506 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-18T22:45:48,508 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-18T22:45:48,509 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=783598, jitterRate=-0.003604710102081299}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-18T22:45:48,509 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-18T22:45:48,509 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731969948496Writing region info on filesystem at 1731969948496Initializing all the Stores at 1731969948497 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969948497Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969948497Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731969948497Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731969948497Cleaning up temporary data from old regions at 1731969948506 (+9 ms)Running coprocessor post-open hooks at 1731969948509 (+3 ms)Region opened successfully at 1731969948509 2024-11-18T22:45:48,510 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731969948482 2024-11-18T22:45:48,513 DEBUG [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-18T22:45:48,513 INFO [RS_OPEN_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-18T22:45:48,514 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=3b0ca91ed64a,33405,1731969947471 2024-11-18T22:45:48,514 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 3b0ca91ed64a,33405,1731969947471, state=OPEN 2024-11-18T22:45:48,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T22:45:48,556 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=3b0ca91ed64a,33405,1731969947471 2024-11-18T22:45:48,556 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x101502012480001, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-18T22:45:48,557 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T22:45:48,557 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-18T22:45:48,559 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-18T22:45:48,559 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=3b0ca91ed64a,33405,1731969947471 in 228 msec 2024-11-18T22:45:48,561 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-18T22:45:48,561 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 656 msec 2024-11-18T22:45:48,562 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-18T22:45:48,562 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-18T22:45:48,563 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T22:45:48,563 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3b0ca91ed64a,33405,1731969947471, seqNum=-1] 2024-11-18T22:45:48,563 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T22:45:48,564 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50223, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T22:45:48,569 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 721 msec 2024-11-18T22:45:48,569 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731969948569, completionTime=-1 2024-11-18T22:45:48,569 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-18T22:45:48,569 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-18T22:45:48,571 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-18T22:45:48,571 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731970008571 2024-11-18T22:45:48,571 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731970068571 2024-11-18T22:45:48,571 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-18T22:45:48,572 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,46767,1731969947307-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-18T22:45:48,572 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,46767,1731969947307-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:45:48,572 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,46767,1731969947307-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:45:48,572 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-3b0ca91ed64a:46767, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:45:48,572 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-18T22:45:48,572 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-18T22:45:48,575 DEBUG [master/3b0ca91ed64a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-18T22:45:48,578 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.040sec 2024-11-18T22:45:48,579 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-18T22:45:48,579 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-18T22:45:48,579 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-18T22:45:48,579 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-18T22:45:48,579 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-18T22:45:48,579 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,46767,1731969947307-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-18T22:45:48,579 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,46767,1731969947307-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-18T22:45:48,586 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-18T22:45:48,586 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-18T22:45:48,587 INFO [master/3b0ca91ed64a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=3b0ca91ed64a,46767,1731969947307-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-18T22:45:48,595 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@306f20c1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T22:45:48,595 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 3b0ca91ed64a,46767,-1 for getting cluster id 2024-11-18T22:45:48,596 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-18T22:45:48,598 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '96331727-e74e-4983-808e-2c5bb264dc02' 2024-11-18T22:45:48,598 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-18T22:45:48,599 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "96331727-e74e-4983-808e-2c5bb264dc02" 2024-11-18T22:45:48,599 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b988b68, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T22:45:48,599 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [3b0ca91ed64a,46767,-1] 2024-11-18T22:45:48,599 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-18T22:45:48,599 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:45:48,600 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33898, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-18T22:45:48,601 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50098664, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-18T22:45:48,602 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-18T22:45:48,603 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=3b0ca91ed64a,33405,1731969947471, seqNum=-1] 2024-11-18T22:45:48,603 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-18T22:45:48,604 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57316, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-18T22:45:48,606 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=3b0ca91ed64a,46767,1731969947307 2024-11-18T22:45:48,606 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-18T22:45:48,609 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-18T22:45:48,609 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-18T22:45:48,611 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/WALs/test.com,8080,1, archiveDir=hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/oldWALs, maxLogs=32 2024-11-18T22:45:48,612 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731969948611 2024-11-18T22:45:48,616 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/WALs/test.com,8080,1/test.com%2C8080%2C1.1731969948611 2024-11-18T22:45:48,616 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34577:34577),(127.0.0.1/127.0.0.1:34017:34017)] 2024-11-18T22:45:48,617 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731969948617 2024-11-18T22:45:48,621 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:48,621 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:48,621 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:48,621 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:48,621 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:48,622 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/WALs/test.com,8080,1/test.com%2C8080%2C1.1731969948611 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/WALs/test.com,8080,1/test.com%2C8080%2C1.1731969948617 2024-11-18T22:45:48,622 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34017:34017),(127.0.0.1/127.0.0.1:34577:34577)] 2024-11-18T22:45:48,622 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/WALs/test.com,8080,1/test.com%2C8080%2C1.1731969948611 is not closed yet, will try archiving it next time 2024-11-18T22:45:48,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39167 is added to blk_1073741835_1011 (size=93) 2024-11-18T22:45:48,623 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:48,623 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:48,623 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:48,624 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:48,624 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:48,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42995 is added to blk_1073741835_1011 (size=93) 2024-11-18T22:45:48,625 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/WALs/test.com,8080,1/test.com%2C8080%2C1.1731969948611 to hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/oldWALs/test.com%2C8080%2C1.1731969948611 2024-11-18T22:45:48,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42995 is added to blk_1073741836_1012 (size=93) 2024-11-18T22:45:48,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39167 is added to blk_1073741836_1012 (size=93) 2024-11-18T22:45:48,630 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/oldWALs 2024-11-18T22:45:48,630 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731969948617) 2024-11-18T22:45:48,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-18T22:45:48,630 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T22:45:48,630 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T22:45:48,630 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:45:48,630 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:45:48,630 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-18T22:45:48,630 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-18T22:45:48,631 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1697176013, stopped=false 2024-11-18T22:45:48,631 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=3b0ca91ed64a,46767,1731969947307 2024-11-18T22:45:48,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T22:45:48,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x101502012480001, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-18T22:45:48,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:45:48,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x101502012480001, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:45:48,651 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T22:45:48,651 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-18T22:45:48,651 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T22:45:48,651 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:45:48,652 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '3b0ca91ed64a,33405,1731969947471' ***** 2024-11-18T22:45:48,652 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-18T22:45:48,652 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:45:48,652 INFO [RS:0;3b0ca91ed64a:33405 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-18T22:45:48,652 INFO [RS:0;3b0ca91ed64a:33405 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-18T22:45:48,652 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-18T22:45:48,652 INFO [RS:0;3b0ca91ed64a:33405 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-18T22:45:48,652 INFO [RS:0;3b0ca91ed64a:33405 {}] regionserver.HRegionServer(959): stopping server 3b0ca91ed64a,33405,1731969947471 2024-11-18T22:45:48,652 INFO [RS:0;3b0ca91ed64a:33405 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T22:45:48,652 INFO [RS:0;3b0ca91ed64a:33405 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;3b0ca91ed64a:33405. 2024-11-18T22:45:48,652 DEBUG [RS:0;3b0ca91ed64a:33405 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-18T22:45:48,652 DEBUG [RS:0;3b0ca91ed64a:33405 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:45:48,652 INFO [RS:0;3b0ca91ed64a:33405 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-18T22:45:48,653 INFO [RS:0;3b0ca91ed64a:33405 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-18T22:45:48,653 INFO [RS:0;3b0ca91ed64a:33405 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-18T22:45:48,653 INFO [RS:0;3b0ca91ed64a:33405 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-18T22:45:48,653 INFO [RS:0;3b0ca91ed64a:33405 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-18T22:45:48,653 DEBUG [RS:0;3b0ca91ed64a:33405 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-18T22:45:48,653 DEBUG [RS:0;3b0ca91ed64a:33405 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-18T22:45:48,653 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-18T22:45:48,653 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-18T22:45:48,653 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-18T22:45:48,653 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-18T22:45:48,653 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-18T22:45:48,653 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-18T22:45:48,655 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33405-0x101502012480001, quorum=127.0.0.1:63179, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-18T22:45:48,674 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/data/hbase/meta/1588230740/.tmp/ns/3f3346fa222a41edafadaa0c14ae1e03 is 43, key is default/ns:d/1731969948565/Put/seqid=0 2024-11-18T22:45:48,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39167 is added to blk_1073741837_1013 (size=5153) 2024-11-18T22:45:48,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42995 is added to blk_1073741837_1013 (size=5153) 2024-11-18T22:45:48,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/MasterData/WALs/3b0ca91ed64a,36309,1731969733443/3b0ca91ed64a%2C36309%2C1731969733443.1731969735725 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:48,853 DEBUG [RS:0;3b0ca91ed64a:33405 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-18T22:45:48,942 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:48,943 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:48,943 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:48,943 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:48,943 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:48,943 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:48,958 INFO [regionserver/3b0ca91ed64a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-18T22:45:48,958 INFO [regionserver/3b0ca91ed64a:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-18T22:45:48,965 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:48,965 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:48,965 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:48,966 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:48,966 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:48,966 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:48,969 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:48,969 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:48,969 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:48,971 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:49,053 DEBUG [RS:0;3b0ca91ed64a:33405 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-18T22:45:49,080 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/data/hbase/meta/1588230740/.tmp/ns/3f3346fa222a41edafadaa0c14ae1e03 2024-11-18T22:45:49,086 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/data/hbase/meta/1588230740/.tmp/ns/3f3346fa222a41edafadaa0c14ae1e03 as hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/data/hbase/meta/1588230740/ns/3f3346fa222a41edafadaa0c14ae1e03 2024-11-18T22:45:49,089 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,39695,1731969738197/3b0ca91ed64a%2C39695%2C1731969738197.1731969738723 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:49,098 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/data/hbase/meta/1588230740/ns/3f3346fa222a41edafadaa0c14ae1e03, entries=2, sequenceid=6, filesize=5.0 K 2024-11-18T22:45:49,099 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 446ms, sequenceid=6, compaction requested=false 2024-11-18T22:45:49,099 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-18T22:45:49,105 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-18T22:45:49,106 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-18T22:45:49,106 INFO [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-18T22:45:49,106 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731969948653Running coprocessor pre-close hooks at 1731969948653Disabling compacts and flushes for region at 1731969948653Disabling writes for close at 1731969948653Obtaining lock to block concurrent updates at 1731969948653Preparing flush snapshotting stores in 1588230740 at 1731969948653Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731969948654 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731969948654Flushing 1588230740/ns: creating writer at 1731969948655 (+1 ms)Flushing 1588230740/ns: appending metadata at 1731969948673 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1731969948673Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@408f49ec: reopening flushed file at 1731969949085 (+412 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 446ms, sequenceid=6, compaction requested=false at 1731969949099 (+14 ms)Writing region close event to WAL at 1731969949101 (+2 ms)Running coprocessor post-close hooks at 1731969949106 (+5 ms)Closed at 1731969949106 2024-11-18T22:45:49,106 DEBUG [RS_CLOSE_META-regionserver/3b0ca91ed64a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-18T22:45:49,254 INFO [RS:0;3b0ca91ed64a:33405 {}] regionserver.HRegionServer(976): stopping server 3b0ca91ed64a,33405,1731969947471; all regions closed. 2024-11-18T22:45:49,254 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:49,254 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:49,254 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:49,254 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:49,254 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:49,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39167 is added to blk_1073741834_1010 (size=1152) 2024-11-18T22:45:49,259 DEBUG [RS:0;3b0ca91ed64a:33405 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/oldWALs 2024-11-18T22:45:49,259 INFO [RS:0;3b0ca91ed64a:33405 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3b0ca91ed64a%2C33405%2C1731969947471.meta:.meta(num 1731969948489) 2024-11-18T22:45:49,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42995 is added to blk_1073741834_1010 (size=1152) 2024-11-18T22:45:49,263 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:49,263 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:49,263 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:49,263 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:49,263 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:49,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39167 is added to blk_1073741833_1009 (size=93) 2024-11-18T22:45:49,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42995 is added to blk_1073741833_1009 (size=93) 2024-11-18T22:45:49,267 DEBUG [RS:0;3b0ca91ed64a:33405 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/oldWALs 2024-11-18T22:45:49,267 INFO [RS:0;3b0ca91ed64a:33405 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 3b0ca91ed64a%2C33405%2C1731969947471:(num 1731969948096) 2024-11-18T22:45:49,267 DEBUG [RS:0;3b0ca91ed64a:33405 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-18T22:45:49,267 INFO [RS:0;3b0ca91ed64a:33405 {}] regionserver.LeaseManager(133): Closed leases 2024-11-18T22:45:49,267 INFO [RS:0;3b0ca91ed64a:33405 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T22:45:49,267 INFO [RS:0;3b0ca91ed64a:33405 {}] hbase.ChoreService(370): Chore service for: regionserver/3b0ca91ed64a:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-18T22:45:49,267 INFO [RS:0;3b0ca91ed64a:33405 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T22:45:49,267 INFO [RS:0;3b0ca91ed64a:33405 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33405 2024-11-18T22:45:49,268 INFO [regionserver/3b0ca91ed64a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T22:45:49,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42521/user/jenkins/test-data/c71bbc9d-452d-1c90-8a81-5a5f96389153/WALs/3b0ca91ed64a,35719,1731969734188/3b0ca91ed64a%2C35719%2C1731969734188.meta.1731969737739.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor194.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-18T22:45:49,276 INFO [RS:0;3b0ca91ed64a:33405 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T22:45:49,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x101502012480001, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/3b0ca91ed64a,33405,1731969947471 2024-11-18T22:45:49,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-18T22:45:49,286 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [3b0ca91ed64a,33405,1731969947471] 2024-11-18T22:45:49,296 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/3b0ca91ed64a,33405,1731969947471 already deleted, retry=false 2024-11-18T22:45:49,296 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 3b0ca91ed64a,33405,1731969947471 expired; onlineServers=0 2024-11-18T22:45:49,296 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '3b0ca91ed64a,46767,1731969947307' ***** 2024-11-18T22:45:49,296 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-18T22:45:49,296 INFO [M:0;3b0ca91ed64a:46767 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-18T22:45:49,296 INFO [M:0;3b0ca91ed64a:46767 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-18T22:45:49,297 DEBUG [M:0;3b0ca91ed64a:46767 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-18T22:45:49,297 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-18T22:45:49,297 DEBUG [M:0;3b0ca91ed64a:46767 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-18T22:45:49,297 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.large.0-1731969947852 {}] cleaner.HFileCleaner(306): Exit Thread[master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.large.0-1731969947852,5,FailOnTimeoutGroup] 2024-11-18T22:45:49,297 INFO [M:0;3b0ca91ed64a:46767 {}] hbase.ChoreService(370): Chore service for: master/3b0ca91ed64a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-18T22:45:49,297 INFO [M:0;3b0ca91ed64a:46767 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-18T22:45:49,297 DEBUG [master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.small.0-1731969947856 {}] cleaner.HFileCleaner(306): Exit Thread[master/3b0ca91ed64a:0:becomeActiveMaster-HFileCleaner.small.0-1731969947856,5,FailOnTimeoutGroup] 2024-11-18T22:45:49,297 DEBUG [M:0;3b0ca91ed64a:46767 {}] master.HMaster(1795): Stopping service threads 2024-11-18T22:45:49,297 INFO [M:0;3b0ca91ed64a:46767 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-18T22:45:49,297 INFO [M:0;3b0ca91ed64a:46767 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-18T22:45:49,297 INFO [M:0;3b0ca91ed64a:46767 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-18T22:45:49,297 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-18T22:45:49,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-18T22:45:49,308 DEBUG [M:0;3b0ca91ed64a:46767 {}] zookeeper.ZKUtil(347): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-18T22:45:49,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-18T22:45:49,308 WARN [M:0;3b0ca91ed64a:46767 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-18T22:45:49,308 INFO [M:0;3b0ca91ed64a:46767 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/.lastflushedseqids 2024-11-18T22:45:49,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42995 is added to blk_1073741838_1014 (size=99) 2024-11-18T22:45:49,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39167 is added to blk_1073741838_1014 (size=99) 2024-11-18T22:45:49,318 INFO [M:0;3b0ca91ed64a:46767 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-18T22:45:49,318 INFO [M:0;3b0ca91ed64a:46767 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-18T22:45:49,318 DEBUG [M:0;3b0ca91ed64a:46767 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-18T22:45:49,318 INFO [M:0;3b0ca91ed64a:46767 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:45:49,318 DEBUG [M:0;3b0ca91ed64a:46767 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:45:49,318 DEBUG [M:0;3b0ca91ed64a:46767 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-18T22:45:49,318 DEBUG [M:0;3b0ca91ed64a:46767 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:45:49,319 INFO [M:0;3b0ca91ed64a:46767 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-18T22:45:49,335 DEBUG [M:0;3b0ca91ed64a:46767 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8831c7be9e434cf09c9ded936bff3a22 is 82, key is hbase:meta,,1/info:regioninfo/1731969948513/Put/seqid=0 2024-11-18T22:45:49,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42995 is added to blk_1073741839_1015 (size=5672) 2024-11-18T22:45:49,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39167 is added to blk_1073741839_1015 (size=5672) 2024-11-18T22:45:49,343 INFO [M:0;3b0ca91ed64a:46767 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8831c7be9e434cf09c9ded936bff3a22 2024-11-18T22:45:49,364 DEBUG [M:0;3b0ca91ed64a:46767 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f22eaece1d5540dc926b9bdf4d601ca6 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731969948569/Put/seqid=0 2024-11-18T22:45:49,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42995 is added to blk_1073741840_1016 (size=5275) 2024-11-18T22:45:49,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39167 is added to blk_1073741840_1016 (size=5275) 2024-11-18T22:45:49,376 INFO [M:0;3b0ca91ed64a:46767 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f22eaece1d5540dc926b9bdf4d601ca6 2024-11-18T22:45:49,386 INFO [RS:0;3b0ca91ed64a:33405 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T22:45:49,386 INFO [RS:0;3b0ca91ed64a:33405 {}] regionserver.HRegionServer(1031): Exiting; stopping=3b0ca91ed64a,33405,1731969947471; zookeeper connection closed. 2024-11-18T22:45:49,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x101502012480001, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:45:49,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33405-0x101502012480001, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:45:49,388 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@16809025 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@16809025 2024-11-18T22:45:49,389 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-18T22:45:49,406 DEBUG [M:0;3b0ca91ed64a:46767 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ed5a3f6a180c4ecd9c70efb0bd27bfb4 is 69, key is 3b0ca91ed64a,33405,1731969947471/rs:state/1731969947936/Put/seqid=0 2024-11-18T22:45:49,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42995 is added to blk_1073741841_1017 (size=5156) 2024-11-18T22:45:49,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39167 is added to blk_1073741841_1017 (size=5156) 2024-11-18T22:45:49,415 INFO [M:0;3b0ca91ed64a:46767 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ed5a3f6a180c4ecd9c70efb0bd27bfb4 2024-11-18T22:45:49,447 DEBUG [M:0;3b0ca91ed64a:46767 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/17b53dad90e04033b4dc1c306ae6cfec is 52, key is load_balancer_on/state:d/1731969948608/Put/seqid=0 2024-11-18T22:45:49,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42995 is added to blk_1073741842_1018 (size=5056) 2024-11-18T22:45:49,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39167 is added to blk_1073741842_1018 (size=5056) 2024-11-18T22:45:49,465 INFO [M:0;3b0ca91ed64a:46767 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/17b53dad90e04033b4dc1c306ae6cfec 2024-11-18T22:45:49,473 DEBUG [M:0;3b0ca91ed64a:46767 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/8831c7be9e434cf09c9ded936bff3a22 as hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8831c7be9e434cf09c9ded936bff3a22 2024-11-18T22:45:49,479 INFO [M:0;3b0ca91ed64a:46767 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/8831c7be9e434cf09c9ded936bff3a22, entries=8, sequenceid=29, filesize=5.5 K 2024-11-18T22:45:49,480 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-18T22:45:49,481 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:49,482 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:49,482 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:49,482 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:49,483 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:49,483 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:49,483 DEBUG [M:0;3b0ca91ed64a:46767 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f22eaece1d5540dc926b9bdf4d601ca6 as hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f22eaece1d5540dc926b9bdf4d601ca6 2024-11-18T22:45:49,490 INFO [M:0;3b0ca91ed64a:46767 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f22eaece1d5540dc926b9bdf4d601ca6, entries=3, sequenceid=29, filesize=5.2 K 2024-11-18T22:45:49,491 DEBUG [M:0;3b0ca91ed64a:46767 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ed5a3f6a180c4ecd9c70efb0bd27bfb4 as hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ed5a3f6a180c4ecd9c70efb0bd27bfb4 2024-11-18T22:45:49,498 INFO [M:0;3b0ca91ed64a:46767 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ed5a3f6a180c4ecd9c70efb0bd27bfb4, entries=1, sequenceid=29, filesize=5.0 K 2024-11-18T22:45:49,499 DEBUG [M:0;3b0ca91ed64a:46767 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/17b53dad90e04033b4dc1c306ae6cfec as hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/17b53dad90e04033b4dc1c306ae6cfec 2024-11-18T22:45:49,506 INFO [M:0;3b0ca91ed64a:46767 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40849/user/jenkins/test-data/14a73bd1-a640-2232-9675-c080924c9d56/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/17b53dad90e04033b4dc1c306ae6cfec, entries=1, sequenceid=29, filesize=4.9 K 2024-11-18T22:45:49,512 INFO [M:0;3b0ca91ed64a:46767 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 194ms, sequenceid=29, compaction requested=false 2024-11-18T22:45:49,531 INFO [M:0;3b0ca91ed64a:46767 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-18T22:45:49,531 DEBUG [M:0;3b0ca91ed64a:46767 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731969949318Disabling compacts and flushes for region at 1731969949318Disabling writes for close at 1731969949318Obtaining lock to block concurrent updates at 1731969949319 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731969949319Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731969949319Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731969949320 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731969949320Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731969949335 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731969949335Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731969949348 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731969949363 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731969949363Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731969949384 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731969949405 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731969949406 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731969949423 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731969949446 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731969949446Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@189167c6: reopening flushed file at 1731969949472 (+26 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@69624d60: reopening flushed file at 1731969949479 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a06ba2: reopening flushed file at 1731969949490 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@62347e76: reopening flushed file at 1731969949498 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 194ms, sequenceid=29, compaction requested=false at 1731969949512 (+14 ms)Writing region close event to WAL at 1731969949531 (+19 ms)Closed at 1731969949531 2024-11-18T22:45:49,541 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:49,541 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:49,542 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:49,542 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:49,542 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-18T22:45:49,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42995 is added to blk_1073741830_1006 (size=10311) 2024-11-18T22:45:49,547 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:49,548 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:49,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39167 is added to blk_1073741830_1006 (size=10311) 2024-11-18T22:45:49,548 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:49,548 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:49,549 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:49,549 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:49,553 INFO [M:0;3b0ca91ed64a:46767 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-18T22:45:49,553 INFO [M:0;3b0ca91ed64a:46767 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46767 2024-11-18T22:45:49,554 INFO [M:0;3b0ca91ed64a:46767 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-18T22:45:49,556 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-18T22:45:49,564 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:49,565 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:49,565 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:49,569 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-18T22:45:49,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:45:49,667 INFO [M:0;3b0ca91ed64a:46767 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-18T22:45:49,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46767-0x101502012480000, quorum=127.0.0.1:63179, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-18T22:45:49,670 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6566813b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:45:49,670 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@73d1af03{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:45:49,670 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:45:49,671 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40ff6368{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:45:49,671 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a747f73{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/hadoop.log.dir/,STOPPED} 2024-11-18T22:45:49,672 WARN [BP-1711530157-172.17.0.2-1731969945010 heartbeating to localhost/127.0.0.1:40849 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T22:45:49,672 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T22:45:49,672 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T22:45:49,672 WARN [BP-1711530157-172.17.0.2-1731969945010 heartbeating to localhost/127.0.0.1:40849 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1711530157-172.17.0.2-1731969945010 (Datanode Uuid 559de490-eefb-46c3-b799-6eb4955e6239) service to localhost/127.0.0.1:40849 2024-11-18T22:45:49,673 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/cluster_d8e3d33d-0ea4-6a3b-9b7d-266378e774db/data/data3/current/BP-1711530157-172.17.0.2-1731969945010 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:45:49,673 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/cluster_d8e3d33d-0ea4-6a3b-9b7d-266378e774db/data/data4/current/BP-1711530157-172.17.0.2-1731969945010 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:45:49,673 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T22:45:49,685 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2b3ad3e3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-18T22:45:49,686 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7fa9909e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:45:49,686 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:45:49,686 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6876746e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:45:49,686 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7583bc54{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/hadoop.log.dir/,STOPPED} 2024-11-18T22:45:49,687 WARN [BP-1711530157-172.17.0.2-1731969945010 heartbeating to localhost/127.0.0.1:40849 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-18T22:45:49,687 WARN [BP-1711530157-172.17.0.2-1731969945010 heartbeating to localhost/127.0.0.1:40849 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1711530157-172.17.0.2-1731969945010 (Datanode Uuid 581da9ea-0637-42ca-93e4-da24997accd6) service to localhost/127.0.0.1:40849 2024-11-18T22:45:49,688 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/cluster_d8e3d33d-0ea4-6a3b-9b7d-266378e774db/data/data2/current/BP-1711530157-172.17.0.2-1731969945010 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:45:49,688 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/cluster_d8e3d33d-0ea4-6a3b-9b7d-266378e774db/data/data1/current/BP-1711530157-172.17.0.2-1731969945010 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-18T22:45:49,688 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-18T22:45:49,688 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-18T22:45:49,688 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-18T22:45:49,694 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2a54ad5a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-18T22:45:49,694 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@561d915{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-18T22:45:49,694 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-18T22:45:49,694 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35fc983f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-18T22:45:49,695 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fbdffd4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6ca51ee4-2cc6-6c0b-51b9-fe1d54ec3605/hadoop.log.dir/,STOPPED} 2024-11-18T22:45:49,702 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-18T22:45:49,719 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-18T22:45:49,730 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=270 (was 232) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:40849 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40849 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:40849 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40849 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40849 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:40849 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (548703531) connection to localhost/127.0.0.1:40849 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40849 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=534 (was 518) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=761 (was 757) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4488 (was 4539)